Searched refs:end (Results 1 - 25 of 41) sorted by relevance

12

/mm/
H A Dmadvise.c47 unsigned long start, unsigned long end, int behavior)
86 error = ksm_madvise(vma, start, end, behavior, &new_flags);
104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
120 if (end != vma->vm_end) {
121 error = split_vma(mm, vma, end, 0);
140 unsigned long end, struct mm_walk *walk)
149 for (index = start; index != end; index += PAGE_SIZE) {
175 unsigned long start, unsigned long end)
183 walk_page_range(start, end, &walk);
189 unsigned long start, unsigned long end,
45 madvise_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
139 swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument
174 force_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument
219 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
274 madvise_dontneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
297 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
342 madvise_hwpoison(int bhv, unsigned long start, unsigned long end) argument
377 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
465 unsigned long end, tmp; local
[all...]
H A Ddebug-pagealloc.c52 unsigned char *end; local
58 for (end = mem + bytes - 1; end > start; end--) {
59 if (*end != PAGE_POISON)
65 else if (start == end && single_bit_flip(*start, PAGE_POISON))
71 end - start + 1, 1);
H A Dmincore.c23 unsigned long addr, unsigned long end,
44 if (addr == end)
98 unsigned long addr, unsigned long end,
101 unsigned long nr = (end - addr) >> PAGE_SHIFT;
117 unsigned long addr, unsigned long end,
155 } while (ptep++, addr = next, addr != end);
160 unsigned long addr, unsigned long end,
168 next = pmd_addr_end(addr, end);
181 } while (pmd++, addr = next, addr != end);
185 unsigned long addr, unsigned long end,
22 mincore_hugetlb_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
97 mincore_unmapped_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
116 mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument
159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument
184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument
202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
228 unsigned long end; local
[all...]
H A Dpagewalk.c6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
18 if (addr == end)
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, argument
37 next = pmd_addr_end(addr, end);
67 } while (pmd++, addr = next, addr != end);
72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, argument
81 next = pud_addr_end(addr, end);
95 } while (pud++, addr = next, addr != end);
102 unsigned long end)
105 return boundary < end
101 hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) argument
108 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
131 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
167 walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
H A Dmprotect.c61 unsigned long addr, unsigned long end, pgprot_t newprot,
126 } while (pte++, addr += PAGE_SIZE, addr != end);
134 pud_t *pud, unsigned long addr, unsigned long end,
148 next = pmd_addr_end(addr, end);
155 mmu_notifier_invalidate_range_start(mm, mni_start, end);
180 } while (pmd++, addr = next, addr != end);
183 mmu_notifier_invalidate_range_end(mm, mni_start, end);
191 pgd_t *pgd, unsigned long addr, unsigned long end,
200 next = pud_addr_end(addr, end);
205 } while (pud++, addr = next, addr != end);
60 change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
133 change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
240 change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument
338 unsigned long vm_flags, nstart, end, tmp, reqprot; local
[all...]
H A Dmsync.c33 unsigned long end; local
47 end = start + len;
48 if (end < start)
51 if (end == start)
54 * If the interval [start,end) covers some unmapped address ranges,
55 * just ignore them, but return -ENOMEM at the end.
63 /* Still start < end. */
70 if (start >= end)
83 fend = fstart + (min(end, vma->vm_end) - start) - 1;
94 if (error || start >= end)
[all...]
H A Dmlock.c212 * @end: end address
228 unsigned long start, unsigned long end, int *nonblocking)
231 unsigned long nr_pages = (end - start) / PAGE_SIZE;
235 VM_BUG_ON(end & ~PAGE_MASK);
237 VM_BUG_ON_VMA(end > vma->vm_end, vma);
417 unsigned long end)
429 end = pgd_addr_end(start, end);
430 end
227 __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) argument
415 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) argument
478 munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
554 mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) argument
615 unsigned long nstart, end, tmp; local
674 unsigned long end, nstart, nend; local
[all...]
H A Dbootmem.c96 unsigned long mapstart, unsigned long start, unsigned long end)
100 mminit_validate_memmodel_limits(&start, &end);
103 bdata->node_low_pfn = end;
110 mapsize = bootmap_bytes(end - start);
113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
114 bdata - bootmem_node_data, start, mapstart, end, mapsize);
159 unsigned long cursor, end; local
164 end = PFN_DOWN(physaddr + size);
166 for (; cursor < end; cursor++) {
175 unsigned long *map, start, end, page local
95 init_bootmem_core(bootmem_data_t *bdata, unsigned long mapstart, unsigned long start, unsigned long end) argument
328 mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) argument
350 mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) argument
395 unsigned long start, end; local
416 unsigned long start, end; local
440 unsigned long start, end; local
461 unsigned long start, end; local
[all...]
H A Dtruncate.c220 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
247 pgoff_t end; /* exclusive */ local
264 * 'start' and 'end' always covers the range of pages to be fully
266 * start of the range and 'partial_end' at the end of the range.
267 * Note that 'end' is exclusive while 'lend' is inclusive.
272 * lend == -1 indicates end-of-file so we have to set 'end'
276 end = -1;
278 end = (lend + 1) >> PAGE_CACHE_SHIFT;
282 while (index < end
479 invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
582 invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
[all...]
H A Dnobootmem.c72 unsigned long cursor, end; local
77 end = PFN_DOWN(addr + size);
79 for (; cursor < end; cursor++) {
85 static void __init __free_pages_memory(unsigned long start, unsigned long end) argument
89 while (start < end) {
92 while (start + (1UL << order) > end)
102 phys_addr_t end)
106 PFN_DOWN(end), max_low_pfn);
119 phys_addr_t start, end; local
124 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NUL
101 __free_memory_core(phys_addr_t start, phys_addr_t end) argument
[all...]
H A Dvmalloc.c58 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) argument
66 } while (pte++, addr += PAGE_SIZE, addr != end);
69 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) argument
76 next = pmd_addr_end(addr, end);
80 } while (pmd++, addr = next, addr != end);
83 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) argument
90 next = pud_addr_end(addr, end);
94 } while (pud++, addr = next, addr != end);
97 static void vunmap_page_range(unsigned long addr, unsigned long end) argument
102 BUG_ON(addr >= end);
112 vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
138 vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
155 vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
178 vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) argument
199 vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) argument
526 vmap_debug_free_range(unsigned long start, unsigned long end) argument
596 __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) argument
655 unsigned long start = ULONG_MAX, end = 0; local
665 unsigned long start = ULONG_MAX, end = 0; local
1014 unsigned long start = ULONG_MAX, end = 0; local
1265 unsigned long end = addr + size; local
1276 unsigned long end = addr + get_vm_area_size(area); local
1309 __get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) argument
1344 __get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) argument
1352 __get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) argument
1632 __vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) argument
2271 pvm_find_next_prev(unsigned long end, struct vmap_area **pnext, struct vmap_area **pprev) argument
2370 unsigned long base, start, end, last_end; local
[all...]
H A Dmemblock.c106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, argument
124 this_start = clamp(this_start, start, end);
125 this_end = clamp(this_end, start, end);
138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, argument
156 this_start = clamp(this_start, start, end);
157 this_end = clamp(this_end, start, end);
191 memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
251 memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) argument
504 phys_addr_t end = base + memblock_cap_size(base, &size); local
609 phys_addr_t end = base + memblock_cap_size(base, &size); local
1040 memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument
1061 memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end) argument
1298 u64 cursor, end; local
1434 phys_addr_t end = base + memblock_cap_size(base, &size); local
1461 phys_addr_t start, end, orig_start, orig_end; local
[all...]
H A Dsparse-vmemmap.c91 unsigned long start, unsigned long end)
98 "page_structs\n", start, end - 1);
152 unsigned long end, int node)
160 for (; addr < end; addr += PAGE_SIZE) {
182 unsigned long end; local
187 end = (unsigned long)(map + PAGES_PER_SECTION);
189 if (vmemmap_populate(start, end, nid))
90 vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) argument
151 vmemmap_populate_basepages(unsigned long start, unsigned long end, int node) argument
H A Dgup.c723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
763 } while (ptep++, addr += PAGE_SIZE, addr != end);
782 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
790 unsigned long end, int write, struct page **pages, int *nr)
808 } while (addr += PAGE_SIZE, addr != end);
837 unsigned long end, int write, struct page **pages, int *nr)
855 } while (addr += PAGE_SIZE, addr != end);
878 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, argument
888 next = pmd_addr_end(addr, end);
907 } while (pmdp++, addr = next, addr != end);
789 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
944 unsigned long addr, len, end; local
[all...]
H A Dvmacache.c112 unsigned long end)
124 if (vma && vma->vm_start == start && vma->vm_end == end) {
110 vmacache_find_exact(struct mm_struct *mm, unsigned long start, unsigned long end) argument
H A Dmmu_notifier.c111 unsigned long end)
119 young |= mn->ops->clear_flush_young(mn, mm, start, end);
174 unsigned long start, unsigned long end)
182 mn->ops->invalidate_range_start(mn, mm, start, end);
189 unsigned long start, unsigned long end)
197 mn->ops->invalidate_range_end(mn, mm, start, end);
109 __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long start, unsigned long end) argument
173 __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) argument
188 __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) argument
H A Dmemory.c89 * that high_memory defines the upper bound on direct map memory, then end
216 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) argument
221 tlb->fullmm = !(start | (end+1));
224 tlb->end = end;
266 * Called at the end of the shootdown operation to free up any resources
269 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) argument
402 unsigned long addr, unsigned long end,
412 next = pmd_addr_end(addr, end);
416 } while (pmd++, addr = next, addr != end);
401 free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
434 free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
470 free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
888 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
951 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
984 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
1012 unsigned long end = vma->vm_end; local
1075 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1222 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) argument
1266 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1285 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) argument
1315 unsigned long end; local
1396 unsigned long end = start + size; local
1422 unsigned long end = address + size; local
1659 remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1680 remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1701 remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1736 unsigned long end = addr + PAGE_ALIGN(size); local
1836 apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1870 apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1892 apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1921 unsigned long end = addr + size; local
[all...]
H A Dmempolicy.c485 unsigned long addr, unsigned long end,
517 } while (pte++, addr += PAGE_SIZE, addr != end);
519 return addr != end;
552 unsigned long addr, unsigned long end,
561 next = pmd_addr_end(addr, end);
575 } while (pmd++, addr = next, addr != end);
580 unsigned long addr, unsigned long end,
589 next = pud_addr_end(addr, end);
597 } while (pud++, addr = next, addr != end);
602 unsigned long addr, unsigned long end,
484 queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
551 queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
579 queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
601 queue_pages_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
631 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
643 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
658 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
742 mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) argument
1195 unsigned long end; local
2179 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) argument
2351 sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) argument
2359 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) argument
2381 shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) argument
[all...]
H A Dmmap.c62 unsigned long start, unsigned long end);
553 unsigned long end, struct vm_area_struct **pprev,
569 if (vma_tmp->vm_start < end)
587 unsigned long addr, unsigned long end)
593 vma = find_vma_intersection(mm, addr, end);
597 nr_pages = (min(end, vma->vm_end) -
604 if (vma->vm_start > end)
607 overlap_len = min(end, vma->vm_end) - vma->vm_start;
729 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
745 if (end >
552 find_vma_links(struct mm_struct *mm, unsigned long addr, unsigned long end, struct vm_area_struct **pprev, struct rb_node ***rb_link, struct rb_node **rb_parent) argument
586 count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) argument
728 vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) argument
1042 vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, const char __user *anon_name) argument
2401 unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) argument
2422 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) argument
2536 unsigned long end; local
[all...]
H A Dpage_cgroup.c196 unsigned long start, end, pfn; local
200 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
212 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
221 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
230 unsigned long start, end, pfn; local
233 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
235 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
H A Dsparse.c170 void __init memory_present(int nid, unsigned long start, unsigned long end) argument
175 mminit_validate_memmodel_limits(&start, &end);
176 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
606 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); local
608 vmemmap_free(start, end);
614 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); local
616 vmemmap_free(start, end);
H A Dpercpu.c251 int *rs, int *re, int end)
253 *rs = find_next_zero_bit(chunk->populated, end, *rs);
254 *re = find_next_bit(chunk->populated, end, *rs + 1);
258 int *rs, int *re, int end)
260 *rs = find_next_bit(chunk->populated, end, *rs);
261 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
266 * page regions between @start and @end in @chunk. @rs and @re should
267 * be integer variables and will be set to start and end page index of
270 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
271 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
250 pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) argument
257 pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) argument
332 int end = chunk->map[i + 1] & ~1; local
[all...]
H A Dinternal.h151 * at the end of a zone and migrate_pfn begins at the start. Movable pages
152 * are moved to the end of a zone during a compaction run and the run
225 unsigned long start, unsigned long end, int *nonblocking);
227 unsigned long start, unsigned long end);
H A Dpage-writeback.c1006 * So we end up using (2) to always keep
1787 * @end: ending page index (inclusive)
1789 * This function scans the page range from @start to @end (inclusive) and tags
1801 pgoff_t start, pgoff_t end)
1809 &start, end, WRITEBACK_TAG_BATCH,
1814 /* We check 'start' to handle wrapping when end == ~0UL */
1851 pgoff_t end; /* Inclusive */ local
1865 end = -1;
1868 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1879 tag_pages_for_writeback(mapping, index, end);
1800 tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) argument
[all...]
H A Dpage_alloc.c2261 * end up not killing anything but false positives are acceptable.
3036 unsigned long end = addr + PAGE_ALIGN(size); local
3038 while (addr < end) {
4016 * Get the start pfn, end pfn and the number of blocks to reserve
4199 * batches of pages, one task can end up with a lot
4508 * get_pfn_range_for_nid - Return the start and end page frames for a node
4513 * It returns the start and end page frame of a node based on information
4515 * with no available memory, a warning is printed and the start and end
4563 * provided by the architecture for a given node by using the end of the
4605 /* Get the start and end o
4939 unsigned long size, start, end; local
5042 unsigned long start, end, mask; local
5445 free_reserved_area(void *start, void *end, int poison, char *s) argument
6276 __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end) argument
6340 alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype) argument
[all...]

Completed in 232 milliseconds

12