Searched defs:mm (Results 1 - 25 of 25) sorted by relevance

/mm/
H A Dmmu_context.c6 #include <linux/mm.h>
16 * mm context.
18 * iocb issuer's mm context, so that copy_from/to_user
23 void use_mm(struct mm_struct *mm) argument
30 if (active_mm != mm) {
31 atomic_inc(&mm->mm_count);
32 tsk->active_mm = mm;
34 tsk->mm = mm;
35 switch_mm(active_mm, mm, ts
51 unuse_mm(struct mm_struct *mm) argument
[all...]
H A Dmsync.c2 * linux/mm/msync.c
11 #include <linux/mm.h>
34 struct mm_struct *mm = current->mm; local
57 down_read(&mm->mmap_sem);
58 vma = find_vma(mm, start);
84 up_read(&mm->mmap_sem);
89 down_read(&mm->mmap_sem);
90 vma = find_vma(mm, start);
100 up_read(&mm
[all...]
H A Dthrash.c2 * mm/thrash.c
21 #include <linux/mm.h>
35 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) argument
39 memcg = try_get_mem_cgroup_from_mm(mm);
46 static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm) argument
52 void grab_swap_token(struct mm_struct *mm) argument
55 unsigned int old_prio = mm->token_priority;
61 current_interval = global_faults - mm->faultstamp;
86 if (mm == swap_token_mm) {
87 mm
121 __put_swap_token(struct mm_struct *mm) argument
[all...]
H A Dfremap.c2 * linux/mm/fremap.c
9 #include <linux/mm.h>
25 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument
41 update_hiwater_rss(mm);
42 dec_mm_counter(mm, MM_FILEPAGES);
47 pte_clear_not_present_full(mm, addr, ptep, 0);
55 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument
62 pte = get_locked_pte(mm, addr, &ptl);
67 zap_pte(mm, vma, addr, pte);
69 set_pte_at(mm, add
83 populate_range(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) argument
125 struct mm_struct *mm = current->mm; local
[all...]
H A Dmmu_notifier.c2 * linux/mm/mmu_notifier.c
15 #include <linux/mm.h>
23 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
25 * in parallel despite there being no task using this mm any more,
33 void __mmu_notifier_release(struct mm_struct *mm) argument
37 spin_lock(&mm->mmu_notifier_mm->lock);
38 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
39 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
54 spin_unlock(&mm->mmu_notifier_mm->lock);
60 * pages in the mm ar
86 __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long address) argument
103 __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) argument
123 __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) argument
143 __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) argument
157 __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) argument
171 __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) argument
185 do_mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm, int take_mmap_sem) argument
249 mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) argument
259 __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) argument
266 __mmu_notifier_mm_destroy(struct mm_struct *mm) argument
283 mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) argument
[all...]
H A Dmadvise.c2 * linux/mm/madvise.c
43 struct mm_struct * mm = vma->vm_mm; local
88 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
98 error = split_vma(mm, vma, start, 1);
104 error = split_vma(mm, vma, end, 0);
222 up_read(&current->mm->mmap_sem);
224 down_read(&current->mm->mmap_sem);
363 down_write(&current->mm->mmap_sem);
365 down_read(&current->mm->mmap_sem);
388 vma = find_vma_prev(current->mm, star
[all...]
H A Dmprotect.c2 * mm/mprotect.c
11 #include <linux/mm.h>
38 static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, argument
45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
52 ptent = ptep_modify_prot_start(mm, addr, pte);
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
72 set_pte_at(mm, addr, pte,
126 struct mm_struct *mm = vma->vm_mm; local
132 pgd = pgd_offset(mm, addr);
148 struct mm_struct *mm local
[all...]
H A Dfilemap_xip.c2 * linux/mm/filemap_xip.c
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
169 struct mm_struct *mm; local
188 mm = vma->vm_mm;
192 pte = page_check_address(page, mm, address, &ptl, 1);
198 dec_mm_counter(mm, MM_FILEPAGES);
H A Dmlock.c2 * linux/mm/mlock.c
10 #include <linux/mm.h>
48 * (see mm/rmap.c).
162 struct mm_struct *mm = vma->vm_mm; local
171 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
189 return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
232 vma == get_gate_vma(current->mm))) {
320 struct mm_struct *mm = vma->vm_mm; local
327 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
331 *prev = vma_merge(mm, *pre
427 struct mm_struct *mm = current->mm; local
[all...]
H A Dmremap.c2 * mm/mremap.c
10 #include <linux/mm.h>
29 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) argument
35 pgd = pgd_offset(mm, addr);
50 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, argument
57 pgd = pgd_offset(mm, addr);
58 pud = pud_alloc(mm, pgd, addr);
62 pmd = pmd_alloc(mm, pud, addr);
77 struct mm_struct *mm = vma->vm_mm; local
96 old_pte = pte_offset_map_lock(mm, old_pm
187 struct mm_struct *mm = vma->vm_mm; local
293 struct mm_struct *mm = current->mm; local
353 struct mm_struct *mm = current->mm; local
439 struct mm_struct *mm = current->mm; local
[all...]
H A Dprocess_vm_access.c2 * linux/mm/process_vm_access.c
12 #include <linux/mm.h>
27 * @mm: mm for task
43 struct mm_struct *mm,
67 down_read(&mm->mmap_sem);
68 pages_pinned = get_user_pages(task, mm, pa,
71 up_read(&mm->mmap_sem);
164 * @mm: mm fo
42 process_vm_rw_pages(struct task_struct *task, struct mm_struct *mm, struct page **process_pages, unsigned long pa, unsigned long start_offset, unsigned long len, const struct iovec *lvec, unsigned long lvec_cnt, unsigned long *lvec_current, size_t *lvec_offset, int vm_write, unsigned int nr_pages_to_copy, ssize_t *bytes_copied) argument
170 process_vm_rw_single_vec(unsigned long addr, unsigned long len, const struct iovec *lvec, unsigned long lvec_cnt, unsigned long *lvec_current, size_t *lvec_offset, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write, ssize_t *bytes_copied) argument
250 struct mm_struct *mm; local
[all...]
H A Dutil.c1 #include <linux/mm.h>
220 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, argument
230 mm->mmap = vma;
243 void arch_pick_mmap_layout(struct mm_struct *mm) argument
245 mm->mmap_base = TASK_UNMAPPED_BASE;
246 mm->get_unmapped_area = arch_get_unmapped_area;
247 mm->unmap_area = arch_unmap_area;
277 * operating on current and current->mm, with force=0 and vma=NULL. However
291 struct mm_struct *mm = current->mm; local
[all...]
H A Doom_kill.c2 * linux/mm/oom_kill.c
12 * in mm/page_alloc.c when we really run out of memory.
21 #include <linux/mm.h>
134 * The process p may have detached its own ->mm while exiting or through
136 * pointer. Return p, or any of its subthreads with a valid ->mm, with
145 if (likely(t->mm))
210 points = get_mm_rss(p->mm) + p->mm->nr_ptes;
211 points += get_mm_counter(p->mm, MM_SWAPENTS);
340 if (!p->mm)
440 struct mm_struct *mm; local
[all...]
H A Dmigrate.c2 * Memory Migration functionality - linux/mm/migration.c
91 struct mm_struct *mm = vma->vm_mm; local
100 ptep = huge_pte_offset(mm, addr);
103 ptl = &mm->page_table_lock;
105 pgd = pgd_offset(mm, addr);
126 ptl = pte_lockptr(mm, pmd);
149 set_pte_at(mm, addr, ptep, pte);
183 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, argument
191 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1096 static int do_move_page_to_node_array(struct mm_struct *mm, argument
1179 do_pages_move(struct mm_struct *mm, struct task_struct *task, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) argument
1268 do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, const void __user **pages, int *status) argument
1311 do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, const void __user * __user *pages, int __user *status) argument
1352 struct mm_struct *mm; local
1413 migrate_vmas(struct mm_struct *mm, const nodemask_t *to, const nodemask_t *from, unsigned long flags) argument
[all...]
H A Drmap.c2 * mm/rmap.c - physical to virtual reverse mappings
21 * Lock ordering in mm:
24 * mm->mmap_sem
28 * mm->page_table_lock or pte_lock
45 #include <linux/mm.h>
157 struct mm_struct *mm = vma->vm_mm; local
175 spin_lock(&mm->page_table_lock);
185 spin_unlock(&mm->page_table_lock);
610 * Check that @page is mapped at @address into @mm.
618 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, argument
699 struct mm_struct *mm = vma->vm_mm; local
928 struct mm_struct *mm = vma->vm_mm; local
1217 struct mm_struct *mm = vma->vm_mm; local
1364 struct mm_struct *mm = vma->vm_mm; local
[all...]
H A Dfilemap.c2 * linux/mm/filemap.c
20 #include <linux/mm.h>
452 error = mem_cgroup_cache_charge(page, current->mm,
635 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, argument
646 up_read(&mm->mmap_sem);
658 up_read(&mm->mmap_sem);
1756 /* Things didn't work out. Return zero to tell the mm layer so. */
H A Dhuge_memory.c8 #include <linux/mm.h>
69 * struct mm_slot - hash lookup from mm to mm_slot
72 * @mm: the mm that this information is valid for
77 struct mm_struct *mm; member in struct:mm_slot
82 * @mm_head: the head of the mm list to scan
615 struct mm_struct *mm)
617 assert_spin_locked(&mm->page_table_lock);
620 if (!mm->pmd_huge_pte)
623 list_add(&pgtable->lru, &mm
614 prepare_pmd_huge_pte(pgtable_t pgtable, struct mm_struct *mm) argument
634 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) argument
702 do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument
802 get_pmd_huge_pte(struct mm_struct *mm) argument
820 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) argument
914 do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) argument
992 follow_trans_huge_pmd(struct mm_struct *mm, unsigned long addr, pmd_t *pmd, unsigned int flags) argument
1094 struct mm_struct *mm = vma->vm_mm; local
1134 struct mm_struct *mm = vma->vm_mm; local
1157 page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, enum page_check_address_pmd_flag flag) argument
1205 struct mm_struct *mm = vma->vm_mm; local
1347 struct mm_struct *mm = vma->vm_mm; local
1579 get_mm_slot(struct mm_struct *mm) argument
1594 insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) argument
1605 khugepaged_test_exit(struct mm_struct *mm) argument
1610 __khugepaged_enter(struct mm_struct *mm) argument
1667 __khugepaged_exit(struct mm_struct *mm) argument
1834 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) argument
2010 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) argument
2084 struct mm_struct *mm = mm_slot->mm; local
2111 struct mm_struct *mm; variable in typeref:struct:mm_struct
2377 __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) argument
2397 split_huge_page_address(struct mm_struct *mm, unsigned long address) argument
[all...]
H A Dksm.c18 #include <linux/mm.h>
85 * struct mm_slot - ksm information per mm that is being scanned
89 * @mm: the mm that this information is valid for
95 struct mm_struct *mm; member in struct:mm_slot
129 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
130 * @mm: the memory structure this rmap_item is pointing into
140 struct mm_struct *mm; member in struct:rmap_item
251 rmap_item->mm = NULL; /* debug safety */
277 static struct mm_slot *get_mm_slot(struct mm_struct *mm) argument
291 insert_to_mm_slots_hash(struct mm_struct *mm, struct mm_slot *mm_slot) argument
314 ksm_test_exit(struct mm_struct *mm) argument
380 struct mm_struct *mm = rmap_item->mm; local
419 struct mm_struct *mm = rmap_item->mm; local
617 struct mm_struct *mm; local
703 struct mm_struct *mm = vma->vm_mm; local
768 struct mm_struct *mm = vma->vm_mm; local
924 struct mm_struct *mm = rmap_item->mm; local
1278 struct mm_struct *mm; local
1459 struct mm_struct *mm = vma->vm_mm; local
1499 __ksm_enter(struct mm_struct *mm) argument
1530 __ksm_exit(struct mm_struct *mm) argument
[all...]
H A Dmemory.c2 * linux/mm/memory.c
42 #include <linux/mm.h>
128 static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) argument
134 add_mm_counter(mm, i, task->rss_stat.count[i]);
141 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) argument
145 if (likely(task->mm == mm))
148 add_mm_counter(mm, member, val);
150 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, membe
163 get_mm_counter(struct mm_struct *mm, int member) argument
181 sync_mm_rss(struct task_struct *task, struct mm_struct *mm) argument
227 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) argument
595 __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) argument
659 add_mm_rss_vec(struct mm_struct *mm, int *rss) argument
1121 struct mm_struct *mm = tlb->mm; local
1340 struct mm_struct *mm = vma->vm_mm; local
1395 struct mm_struct *mm = vma->vm_mm; local
1452 struct mm_struct *mm = vma->vm_mm; local
1643 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument
1854 fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags) argument
1934 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument
1981 __get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) argument
2006 struct mm_struct *mm = vma->vm_mm; local
2075 struct mm_struct *mm = vma->vm_mm; local
2178 remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
2199 remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
2220 remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
2256 struct mm_struct *mm = vma->vm_mm; local
2315 apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
2349 apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
2371 apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
2395 apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) argument
2425 pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) argument
2871 do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
3088 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) argument
3164 __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument
3349 do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
3369 do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
3405 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) argument
3461 handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
3530 __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) argument
3553 __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) argument
3626 get_gate_vma(struct mm_struct *mm) argument
3646 __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) argument
3684 follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) argument
3779 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3847 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3861 struct mm_struct *mm; local
3879 struct mm_struct *mm = current->mm; local
[all...]
H A Dmempolicy.c69 #include <linux/mm.h>
427 * Rebind each vma in mm to new nodemask.
429 * Call holding a reference to mm. Takes mm->mmap_sem during call.
432 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) argument
436 down_write(&mm->mmap_sem);
437 for (vma = mm->mmap; vma; vma = vma->vm_next)
439 up_write(&mm->mmap_sem);
570 check_range(struct mm_struct *mm, unsigned long start, unsigned long end, argument
577 first = find_vma(mm, star
632 mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) argument
722 struct mm_struct *mm = current->mm; local
795 lookup_node(struct mm_struct *mm, unsigned long addr) argument
813 struct mm_struct *mm = current->mm; local
927 migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) argument
959 do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) argument
1074 do_migrate_pages(struct mm_struct *mm, const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) argument
1091 struct mm_struct *mm = current->mm; local
1295 struct mm_struct *mm = NULL; local
[all...]
H A Dmmap.c2 * mm/mmap.c
11 #include <linux/mm.h>
49 static void unmap_region(struct mm_struct *mm,
112 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) argument
177 if (mm)
178 allowed -= mm->total_vm / 32;
247 struct mm_struct *mm = current->mm; local
250 down_write(&mm->mmap_sem);
255 * randomize_va_space to 2, which will still cause mm
336 validate_mm(struct mm_struct *mm) argument
357 find_vma_prepare(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node ** rb_parent) argument
393 __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) argument
423 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) argument
431 vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) argument
458 __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) argument
470 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) argument
493 struct mm_struct *mm = vma->vm_mm; local
750 vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy) argument
921 vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) argument
946 struct mm_struct * mm = current->mm; local
1197 struct mm_struct *mm = current->mm; local
1367 struct mm_struct *mm = current->mm; local
1421 arch_unmap_area(struct mm_struct *mm, unsigned long addr) argument
1443 struct mm_struct *mm = current->mm; local
1523 arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) argument
1569 find_vma(struct mm_struct *mm, unsigned long addr) argument
1611 find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) argument
1628 struct mm_struct *mm = vma->vm_mm; local
1785 find_extend_vma(struct mm_struct *mm, unsigned long addr) argument
1807 find_extend_vma(struct mm_struct * mm, unsigned long addr) argument
1836 remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) argument
1855 unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) argument
1878 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) argument
1909 __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long addr, int new_below) argument
1986 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2000 do_munmap(struct mm_struct *mm, unsigned long start, size_t len) argument
2087 struct mm_struct *mm = current->mm; local
2097 verify_mm_writelocked(struct mm_struct *mm) argument
2114 struct mm_struct * mm = current->mm; local
2211 exit_mmap(struct mm_struct *mm) argument
2261 insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) argument
2301 struct mm_struct *mm = vma->vm_mm; local
2378 may_expand_vm(struct mm_struct *mm, unsigned long npages) argument
2439 install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) argument
2482 vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) argument
2505 vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) argument
2555 mm_take_all_locks(struct mm_struct *mm) argument
2626 mm_drop_all_locks(struct mm_struct *mm) argument
[all...]
H A Dnommu.c2 * linux/mm/nommu.c
4 * Replacement code for mm functions to support CPU's that don't
17 #include <linux/mm.h>
115 vma = find_vma(current->mm, (unsigned long)objp);
127 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, argument
145 vma = find_vma(mm, start);
177 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, argument
188 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
242 down_write(&current->mm->mmap_sem);
243 vma = find_vma(current->mm, (unsigne
486 struct mm_struct *mm = current->mm; local
664 struct mm_struct *mm = vma->vm_mm; local
680 add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) argument
751 struct mm_struct *mm = vma->vm_mm; local
787 delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) argument
805 find_vma(struct mm_struct *mm, unsigned long addr) argument
833 find_extend_vma(struct mm_struct *mm, unsigned long addr) argument
851 find_vma_exact(struct mm_struct *mm, unsigned long addr, unsigned long len) argument
1529 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
1595 shrink_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long from, unsigned long to) argument
1636 do_munmap(struct mm_struct *mm, unsigned long start, size_t len) argument
1715 struct mm_struct *mm = current->mm; local
1726 exit_mmap(struct mm_struct *mm) argument
1849 arch_unmap_area(struct mm_struct *mm, unsigned long addr) argument
1876 __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) argument
1964 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2006 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2018 struct mm_struct *mm; local
[all...]
H A Dswapfile.c2 * linux/mm/swapfile.c
8 #include <linux/mm.h>
996 static int unuse_mm(struct mm_struct *mm, argument
1002 if (!down_read_trylock(&mm->mmap_sem)) {
1009 down_read(&mm->mmap_sem);
1012 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1016 up_read(&mm->mmap_sem);
1076 * start at the first mm we freed the previous entry from
1080 * A simpler strategy would be to start at the last mm we
1085 * duplicated after we scanned child: using last mm woul
1165 struct mm_struct *mm; local
[all...]
H A Dhugetlb.c8 #include <linux/mm.h>
65 * down_write(&mm->mmap_sem);
67 * down_read(&mm->mmap_sem);
2214 struct mm_struct *mm = vma->vm_mm; local
2234 mmu_notifier_invalidate_range_start(mm, start, end);
2235 spin_lock(&mm->page_table_lock);
2237 ptep = huge_pte_offset(mm, address);
2241 if (huge_pmd_unshare(mm, &address, ptep))
2265 pte = huge_ptep_get_and_clear(mm, address, ptep);
2280 spin_unlock(&mm
2304 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument
2355 hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page) argument
2506 hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int flags) argument
2634 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
2743 follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int write) argument
2750 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, unsigned int flags) argument
2836 struct mm_struct *mm = vma->vm_mm; local
[all...]
H A Dmemcontrol.c27 #include <linux/mm.h>
811 * mm_update_next_owner() may clear mm->owner to NULL
822 struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) argument
826 if (!mm)
829 * Because we have no locks, mm->owner's may be being moved to other
835 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
959 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) argument
963 if (!mm)
967 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1131 curr = try_get_mem_cgroup_from_mm(p->mm);
2209 __mem_cgroup_try_charge(struct mm_struct *mm, gfp_t gfp_mask, unsigned int nr_pages, struct mem_cgroup **ptr, bool oom) argument
2623 mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, enum charge_type ctype) argument
2650 mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
2695 mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) argument
2728 mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page, gfp_t mask, struct mem_cgroup **memcgp) argument
5209 mem_cgroup_count_precharge(struct mm_struct *mm) argument
5234 mem_cgroup_precharge_mc(struct mm_struct *mm) argument
5312 struct mm_struct *mm; local
5421 mem_cgroup_move_charge(struct mm_struct *mm) argument
5465 struct mm_struct *mm = get_task_mm(p); local
[all...]

Completed in 773 milliseconds