Searched refs:vma (Results 1 - 25 of 272) sorted by relevance

1234567891011

/drivers/media/v4l2-core/
H A Dvideobuf2-memops.c27 * @vma: given virtual memory area
36 struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) argument
44 if (vma->vm_ops && vma->vm_ops->open)
45 vma->vm_ops->open(vma);
47 if (vma->vm_file)
48 get_file(vma->vm_file);
50 memcpy(vma_copy, vma, sizeof(*vma));
67 vb2_put_vma(struct vm_area_struct *vma) argument
100 struct vm_area_struct *vma; local
146 vb2_common_vm_open(struct vm_area_struct *vma) argument
164 vb2_common_vm_close(struct vm_area_struct *vma) argument
[all...]
H A Dvideobuf-dma-contig.c66 static void videobuf_vm_open(struct vm_area_struct *vma) argument
68 struct videobuf_mapping *map = vma->vm_private_data;
70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
71 map, map->count, vma->vm_start, vma->vm_end);
76 static void videobuf_vm_close(struct vm_area_struct *vma) argument
78 struct videobuf_mapping *map = vma->vm_private_data;
82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
83 map, map->count, vma->vm_start, vma
164 struct vm_area_struct *vma; local
276 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) argument
[all...]
H A Dvideobuf-vmalloc.c54 static void videobuf_vm_open(struct vm_area_struct *vma) argument
56 struct videobuf_mapping *map = vma->vm_private_data;
58 dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
59 map->count, vma->vm_start, vma->vm_end);
64 static void videobuf_vm_close(struct vm_area_struct *vma) argument
66 struct videobuf_mapping *map = vma->vm_private_data;
70 dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
71 map->count, vma->vm_start, vma
232 __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) argument
[all...]
H A Dvideobuf2-dma-sg.c43 struct vm_area_struct *vma; member in struct:vb2_dma_sg_buf
159 static inline int vma_is_io(struct vm_area_struct *vma) argument
161 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
170 struct vm_area_struct *vma; local
190 vma = find_vma(current->mm, vaddr);
191 if (!vma) {
192 dprintk(1, "no vma for address %lu\n", vaddr);
196 if (vma->vm_end < vaddr + size) {
197 dprintk(1, "vma at %lu is too small for %lu bytes\n",
202 buf->vma
301 vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) argument
[all...]
H A Dvideobuf2-vmalloc.c27 struct vm_area_struct *vma; member in struct:vb2_vmalloc_buf
78 struct vm_area_struct *vma; local
90 vma = find_vma(current->mm, vaddr);
91 if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
92 if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
94 buf->vma = vma;
153 if (buf->vma)
179 vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) argument
[all...]
/drivers/gpu/drm/
H A Ddrm_vm.c48 struct vm_area_struct *vma; member in struct:drm_vma_entry
52 static void drm_vm_open(struct vm_area_struct *vma);
53 static void drm_vm_close(struct vm_area_struct *vma);
56 struct vm_area_struct *vma)
58 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
66 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
67 vma->vm_start))
77 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) argument
79 pgprot_t tmp = vm_get_page_prot(vma
55 drm_io_prot(struct drm_local_map *map, struct vm_area_struct *vma) argument
98 drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
171 drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
187 drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
217 drm_vm_shm_close(struct vm_area_struct *vma) argument
289 drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
323 drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
349 drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
354 drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
359 drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
364 drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
405 drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma) argument
421 drm_vm_open(struct vm_area_struct *vma) argument
431 drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma) argument
456 drm_vm_close(struct vm_area_struct *vma) argument
476 drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) argument
539 drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) argument
653 drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma) argument
672 struct drm_vma_entry *vma, *vma_temp; local
686 struct vm_area_struct *vma; local
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_uverbs.c71 static void ehca_mm_open(struct vm_area_struct *vma) argument
73 u32 *count = (u32 *)vma->vm_private_data;
75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
76 vma->vm_start, vma->vm_end);
82 vma->vm_start, vma->vm_end);
84 vma->vm_start, vma->vm_end, *count);
87 static void ehca_mm_close(struct vm_area_struct *vma) argument
105 ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas, u32 *mm_count) argument
135 ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue, u32 *mm_count) argument
161 ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, u32 rsrc_type) argument
198 ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, u32 rsrc_type) argument
248 ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) argument
[all...]
/drivers/gpu/drm/i915/
H A Di915_gem_evict.c37 mark_free(struct i915_vma *vma, struct list_head *unwind) argument
39 if (vma->pin_count)
42 if (WARN_ON(!list_empty(&vma->exec_list)))
45 list_add(&vma->exec_list, unwind);
46 return drm_mm_scan_add_block(&vma->node);
63 * This function is used by the object/vma binding code.
75 struct i915_vma *vma; local
114 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
115 if (mark_free(vma, &unwind_list))
123 list_for_each_entry(vma,
214 struct i915_vma *vma, *next; local
[all...]
H A Di915_gem_execbuffer.c131 struct i915_vma *vma; local
141 * lookup_or_create exists as an interface to get at the vma
145 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
146 if (IS_ERR(vma)) {
148 ret = PTR_ERR(vma);
153 list_add_tail(&vma->exec_list, &eb->vmas);
156 vma->exec_entry = &exec[i];
158 eb->lut[i] = vma;
161 vma->exec_handle = handle;
162 hlist_add_head(&vma
199 struct i915_vma *vma; local
210 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) argument
232 struct i915_vma *vma; local
361 struct i915_vma *vma = local
436 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct eb_vmas *eb) argument
482 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *relocs) argument
501 struct i915_vma *vma; local
523 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_engine_cs *ring, bool *need_reloc) argument
569 need_reloc_mappable(struct i915_vma *vma) argument
590 eb_vma_misplaced(struct i915_vma *vma) argument
618 struct i915_vma *vma; local
718 struct i915_vma *vma; local
828 struct i915_vma *vma; local
950 struct i915_vma *vma; local
1217 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); local
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c45 struct vm_area_struct *vma,
69 up_read(&vma->vm_mm->mmap_sem);
86 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
89 vma->vm_private_data;
117 up_read(&vma->vm_mm->mmap_sem);
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
177 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
179 page_last = vma_pages(vma) + vma
44 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_area_struct *vma, struct vm_fault *vmf) argument
260 ttm_bo_vm_open(struct vm_area_struct *vma) argument
270 ttm_bo_vm_close(struct vm_area_struct *vma) argument
308 ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) argument
353 ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) argument
[all...]
/drivers/xen/
H A Dprivcmd.c47 struct vm_area_struct *vma,
162 struct vm_area_struct *vma; member in struct:mmap_mfn_state
170 struct vm_area_struct *vma = st->vma; local
180 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
183 rc = xen_remap_domain_mfn_range(vma,
186 vma->vm_page_prot,
200 struct vm_area_struct *vma; local
226 vma = find_vma(mm, msg->va);
229 if (!vma || (ms
255 struct vm_area_struct *vma; member in struct:mmap_batch_state
279 struct vm_area_struct *vma = st->vma; local
346 alloc_empty_pages(struct vm_area_struct *vma, int numpgs) argument
375 struct vm_area_struct *vma; local
532 privcmd_close(struct vm_area_struct *vma) argument
550 privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
564 privcmd_mmap(struct file *file, struct vm_area_struct *vma) argument
587 privcmd_vma_range_is_mapped( struct vm_area_struct *vma, unsigned long addr, unsigned long nr_pages) argument
[all...]
/drivers/xen/xenfs/
H A Dxenstored.c33 static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma) argument
35 size_t size = vma->vm_end - vma->vm_start;
37 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
40 if (remap_pfn_range(vma, vma->vm_start,
42 size, vma->vm_page_prot))
/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.h62 * @vma: a pointer to vm_area.
72 struct vm_area_struct *vma; member in struct:exynos_drm_gem_obj
143 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
146 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
148 static inline int vma_is_io(struct vm_area_struct *vma) argument
150 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
154 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
157 void exynos_gem_put_vma(struct vm_area_struct *vma);
163 struct vm_area_struct *vma);
168 struct vm_area_struct *vma);
[all...]
H A Dexynos_drm_gem.c57 struct vm_area_struct *vma)
63 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65 vma->vm_page_prot =
66 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
68 vma->vm_page_prot =
69 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
80 struct vm_area_struct *vma,
107 return vm_insert_mixed(vma, f_vaddr, pfn);
322 struct vm_area_struct *vma)
56 update_vm_cache_attr(struct exynos_drm_gem_obj *obj, struct vm_area_struct *vma) argument
79 exynos_drm_gem_map_buf(struct drm_gem_object *obj, struct vm_area_struct *vma, unsigned long f_vaddr, pgoff_t page_offset) argument
321 exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) argument
381 exynos_gem_get_vma(struct vm_area_struct *vma) argument
404 exynos_gem_put_vma(struct vm_area_struct *vma) argument
418 exynos_gem_get_pages_from_userptr(unsigned long start, unsigned int npages, struct page **pages, struct vm_area_struct *vma) argument
459 exynos_gem_put_pages_to_userptr(struct page **pages, unsigned int npages, struct vm_area_struct *vma) argument
595 exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
618 exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) argument
[all...]
/drivers/staging/lustre/lustre/llite/
H A Dllite_mmap.c57 struct vm_area_struct *vma, unsigned long addr,
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
69 struct vm_area_struct *vma, *ret = NULL; local
74 for (vma = find_vma(mm, addr);
75 vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
76 if (vma
56 policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma, unsigned long addr, size_t count) argument
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags) argument
169 ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, bool *retry) argument
294 ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) argument
343 ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
384 ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) argument
431 ll_vm_open(struct vm_area_struct *vma) argument
444 ll_vm_close(struct vm_area_struct *vma) argument
477 ll_file_mmap(struct file *file, struct vm_area_struct *vma) argument
[all...]
/drivers/char/
H A Duv_mmtimer.c43 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma);
142 * @vma: VMA to map the registers into
147 static int uv_mmtimer_mmap(struct file *file, struct vm_area_struct *vma) argument
151 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
154 if (vma->vm_flags & VM_WRITE)
160 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
166 if (remap_pfn_range(vma, vma
[all...]
H A Dmspec.c83 * structure is pointed to by the vma->vm_private_data field in the vma struct.
85 * This structure is shared by all vma's that are split off from the
86 * original vma when split_vma()'s are done.
145 mspec_open(struct vm_area_struct *vma) argument
149 vdata = vma->vm_private_data;
157 * belonging to all the vma's sharing this vma_data structure.
160 mspec_close(struct vm_area_struct *vma) argument
166 vdata = vma->vm_private_data;
200 mspec_fault(struct vm_area_struct *vma, struc argument
255 mspec_mmap(struct file *file, struct vm_area_struct *vma, enum mspec_page_type type) argument
298 fetchop_mmap(struct file *file, struct vm_area_struct *vma) argument
304 cached_mmap(struct file *file, struct vm_area_struct *vma) argument
310 uncached_mmap(struct file *file, struct vm_area_struct *vma) argument
[all...]
/drivers/gpu/drm/nouveau/core/subdev/vm/
H A Dbase.c32 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) argument
34 struct nouveau_vm *vm = vma->vm;
37 int big = vma->node->type != vmm->spg_shift;
38 u32 offset = vma->node->offset + (delta >> 12);
39 u32 bits = vma->node->type - 12;
58 vmm->map(vma, pgt, node, pte, len, phys, delta);
68 delta += (u64)len << vma->node->type;
76 nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, argument
79 struct nouveau_vm *vm = vma->vm;
81 int big = vma
134 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, struct nouveau_mem *mem) argument
172 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node) argument
184 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) argument
219 nouveau_vm_unmap(struct nouveau_vma *vma) argument
289 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, u32 access, struct nouveau_vma *vma) argument
337 nouveau_vm_put(struct nouveau_vma *vma) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_mmap.c64 static void ipath_vma_open(struct vm_area_struct *vma) argument
66 struct ipath_mmap_info *ip = vma->vm_private_data;
71 static void ipath_vma_close(struct vm_area_struct *vma) argument
73 struct ipath_mmap_info *ip = vma->vm_private_data;
86 * @vma: the VMA to be initialized
89 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
115 ret = remap_vmalloc_range(vma, i
[all...]
/drivers/infiniband/hw/qib/
H A Dqib_mmap.c64 static void qib_vma_open(struct vm_area_struct *vma) argument
66 struct qib_mmap_info *ip = vma->vm_private_data;
71 static void qib_vma_close(struct vm_area_struct *vma) argument
73 struct qib_mmap_info *ip = vma->vm_private_data;
86 * @vma: the VMA to be initialized
89 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) argument
92 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
93 unsigned long size = vma->vm_end - vma->vm_start;
115 ret = remap_vmalloc_range(vma, i
[all...]
/drivers/sbus/char/
H A Dflash.c36 flash_mmap(struct file *file, struct vm_area_struct *vma) argument
46 if ((vma->vm_flags & VM_READ) &&
47 (vma->vm_flags & VM_WRITE)) {
51 if (vma->vm_flags & VM_READ) {
54 } else if (vma->vm_flags & VM_WRITE) {
64 if ((vma->vm_pgoff << PAGE_SHIFT) > size)
66 addr = vma->vm_pgoff + (addr >> PAGE_SHIFT);
68 if (vma->vm_end - (vma->vm_start + (vma
[all...]
/drivers/xen/xenbus/
H A Dxenbus_dev_backend.c93 static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma) argument
95 size_t size = vma->vm_end - vma->vm_start;
100 if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
103 if (remap_pfn_range(vma, vma->vm_start,
105 size, vma->vm_page_prot))
/drivers/misc/cxl/
H A Dcontext.c101 * Map a per-context mmio space into the given vma.
103 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) argument
105 u64 len = vma->vm_end - vma->vm_start;
109 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
110 return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
126 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
127 return vm_iomap_memory(vma, ct
[all...]
/drivers/misc/mic/host/
H A Dmic_fops.c192 mic_mmap(struct file *f, struct vm_area_struct *vma) argument
195 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
196 unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
203 if (vma->vm_flags & VM_WRITE)
210 err = remap_pfn_range(vma, vma->vm_start + offset,
211 pa >> PAGE_SHIFT, size, vma->vm_page_prot);
215 "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
217 pa, vma
[all...]
/drivers/staging/unisys/visorchipset/
H A Dfile.c38 static int visorchipset_mmap(struct file *file, struct vm_area_struct *vma);
138 visorchipset_mmap(struct file *file, struct vm_area_struct *vma) argument
141 ulong offset = vma->vm_pgoff << PAGE_SHIFT;
152 vma->vm_flags |= VM_IO;
167 if (remap_pfn_range(vma, vma->vm_start,
169 vma->vm_end - vma->vm_start,
171 (vma->vm_page_prot))) {

Completed in 4589 milliseconds

1234567891011