/mm/ |
H A D | msync.c | 33 unsigned long end; local 47 end = start + len; 48 if (end < start) 51 if (end == start) 54 * If the interval [start,end) covers some unmapped address ranges, 55 * just ignore them, but return -ENOMEM at the end. 63 /* Still start < end. */ 70 if (start >= end) 83 fend = fstart + (min(end, vma->vm_end) - start) - 1; 94 if (error || start >= end) [all...] |
H A D | pagewalk.c | 6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument 18 if (addr == end) 27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, argument 37 next = pmd_addr_end(addr, end); 67 } while (pmd++, addr = next, addr != end); 72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, argument 81 next = pud_addr_end(addr, end); 95 } while (pud++, addr = next, addr != end); 102 unsigned long end) 105 return boundary < end 101 hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) argument 108 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument 131 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument 167 walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) argument [all...] |
H A D | vmacache.c | 112 unsigned long end) 124 if (vma && vma->vm_start == start && vma->vm_end == end) { 110 vmacache_find_exact(struct mm_struct *mm, unsigned long start, unsigned long end) argument
|
H A D | debug-pagealloc.c | 52 unsigned char *end; local 58 for (end = mem + bytes - 1; end > start; end--) { 59 if (*end != PAGE_POISON) 65 else if (start == end && single_bit_flip(*start, PAGE_POISON)) 71 end - start + 1, 1);
|
H A D | mincore.c | 23 unsigned long addr, unsigned long end, 44 if (addr == end) 98 unsigned long addr, unsigned long end, 101 unsigned long nr = (end - addr) >> PAGE_SHIFT; 117 unsigned long addr, unsigned long end, 155 } while (ptep++, addr = next, addr != end); 160 unsigned long addr, unsigned long end, 168 next = pmd_addr_end(addr, end); 181 } while (pmd++, addr = next, addr != end); 185 unsigned long addr, unsigned long end, 22 mincore_hugetlb_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument 97 mincore_unmapped_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument 116 mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument 159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument 184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument 202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument 228 unsigned long end; local [all...] |
H A D | mmu_notifier.c | 111 unsigned long end) 119 young |= mn->ops->clear_flush_young(mn, mm, start, end); 174 unsigned long start, unsigned long end) 182 mn->ops->invalidate_range_start(mn, mm, start, end); 189 unsigned long start, unsigned long end) 197 mn->ops->invalidate_range_end(mn, mm, start, end); 109 __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long start, unsigned long end) argument 173 __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) argument 188 __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) argument
|
H A D | sparse-vmemmap.c | 91 unsigned long start, unsigned long end) 98 "page_structs\n", start, end - 1); 152 unsigned long end, int node) 160 for (; addr < end; addr += PAGE_SIZE) { 182 unsigned long end; local 187 end = (unsigned long)(map + PAGES_PER_SECTION); 189 if (vmemmap_populate(start, end, nid)) 90 vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) argument 151 vmemmap_populate_basepages(unsigned long start, unsigned long end, int node) argument
|
H A D | gup.c | 723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument 763 } while (ptep++, addr += PAGE_SIZE, addr != end); 782 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument 790 unsigned long end, int write, struct page **pages, int *nr) 808 } while (addr += PAGE_SIZE, addr != end); 837 unsigned long end, int write, struct page **pages, int *nr) 855 } while (addr += PAGE_SIZE, addr != end); 878 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, argument 888 next = pmd_addr_end(addr, end); 907 } while (pmdp++, addr = next, addr != end); 789 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 944 unsigned long addr, len, end; local [all...] |
H A D | madvise.c | 47 unsigned long start, unsigned long end, int behavior) 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); 104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 120 if (end != vma->vm_end) { 121 error = split_vma(mm, vma, end, 0); 140 unsigned long end, struct mm_walk *walk) 149 for (index = start; index != end; index += PAGE_SIZE) { 175 unsigned long start, unsigned long end) 183 walk_page_range(start, end, &walk); 189 unsigned long start, unsigned long end, 45 madvise_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument 139 swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument 174 force_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument 219 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 274 madvise_dontneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 297 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 342 madvise_hwpoison(int bhv, unsigned long start, unsigned long end) argument 377 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument 465 unsigned long end, tmp; local [all...] |
H A D | mprotect.c | 61 unsigned long addr, unsigned long end, pgprot_t newprot, 126 } while (pte++, addr += PAGE_SIZE, addr != end); 134 pud_t *pud, unsigned long addr, unsigned long end, 148 next = pmd_addr_end(addr, end); 155 mmu_notifier_invalidate_range_start(mm, mni_start, end); 180 } while (pmd++, addr = next, addr != end); 183 mmu_notifier_invalidate_range_end(mm, mni_start, end); 191 pgd_t *pgd, unsigned long addr, unsigned long end, 200 next = pud_addr_end(addr, end); 205 } while (pud++, addr = next, addr != end); 60 change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 133 change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 240 change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument 338 unsigned long vm_flags, nstart, end, tmp, reqprot; local [all...] |
H A D | mremap.c | 458 unsigned long end = vma->vm_end + delta; local 459 if (end < vma->vm_end) /* overflow */ 461 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 463 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 536 /* old_len exactly to the end of the area..
|
H A D | nobootmem.c | 72 unsigned long cursor, end; local 77 end = PFN_DOWN(addr + size); 79 for (; cursor < end; cursor++) { 85 static void __init __free_pages_memory(unsigned long start, unsigned long end) argument 89 while (start < end) { 92 while (start + (1UL << order) > end) 102 phys_addr_t end) 106 PFN_DOWN(end), max_low_pfn); 119 phys_addr_t start, end; local 124 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NUL 101 __free_memory_core(phys_addr_t start, phys_addr_t end) argument [all...] |
H A D | readahead.c | 573 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; local 574 unsigned long len = end - start + 1;
|
H A D | truncate.c | 220 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets 247 pgoff_t end; /* exclusive */ local 264 * 'start' and 'end' always covers the range of pages to be fully 266 * start of the range and 'partial_end' at the end of the range. 267 * Note that 'end' is exclusive while 'lend' is inclusive. 272 * lend == -1 indicates end-of-file so we have to set 'end' 276 end = -1; 278 end = (lend + 1) >> PAGE_CACHE_SHIFT; 282 while (index < end 479 invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) argument 582 invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) argument [all...] |
H A D | mlock.c | 212 * @end: end address 228 unsigned long start, unsigned long end, int *nonblocking) 231 unsigned long nr_pages = (end - start) / PAGE_SIZE; 235 VM_BUG_ON(end & ~PAGE_MASK); 237 VM_BUG_ON_VMA(end > vma->vm_end, vma); 417 unsigned long end) 429 end = pgd_addr_end(start, end); 430 end 227 __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) argument 415 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) argument 478 munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 554 mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) argument 615 unsigned long nstart, end, tmp; local 674 unsigned long end, nstart, nend; local [all...] |
H A D | page_cgroup.c | 196 unsigned long start, end, pfn; local 200 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 212 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 221 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 230 unsigned long start, end, pfn; local 233 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 235 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
|
H A D | bootmem.c | 96 unsigned long mapstart, unsigned long start, unsigned long end) 100 mminit_validate_memmodel_limits(&start, &end); 103 bdata->node_low_pfn = end; 110 mapsize = bootmap_bytes(end - start); 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 159 unsigned long cursor, end; local 164 end = PFN_DOWN(physaddr + size); 166 for (; cursor < end; cursor++) { 175 unsigned long *map, start, end, page local 95 init_bootmem_core(bootmem_data_t *bdata, unsigned long mapstart, unsigned long start, unsigned long end) argument 328 mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) argument 350 mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) argument 395 unsigned long start, end; local 416 unsigned long start, end; local 440 unsigned long start, end; local 461 unsigned long start, end; local [all...] |
H A D | sparse.c | 170 void __init memory_present(int nid, unsigned long start, unsigned long end) argument 175 mminit_validate_memmodel_limits(&start, &end); 176 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 606 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); local 608 vmemmap_free(start, end); 614 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); local 616 vmemmap_free(start, end);
|
H A D | filemap.c | 261 * @end: offset in bytes where the range ends (inclusive) 265 * within the byte offsets <start, end> inclusive. 273 loff_t end, int sync_mode) 280 .range_end = end, 303 loff_t end) 305 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 335 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; local 344 while ((index <= end) && 347 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { 354 if (page->index > end) 272 __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) argument 302 filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) argument 2372 pgoff_t end; local [all...] |
H A D | memblock.c | 106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, argument 124 this_start = clamp(this_start, start, end); 125 this_end = clamp(this_end, start, end); 138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, argument 156 this_start = clamp(this_start, start, end); 157 this_end = clamp(this_end, start, end); 191 memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument 251 memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) argument 504 phys_addr_t end = base + memblock_cap_size(base, &size); local 609 phys_addr_t end = base + memblock_cap_size(base, &size); local 1040 memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument 1061 memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end) argument 1298 u64 cursor, end; local 1434 phys_addr_t end = base + memblock_cap_size(base, &size); local 1461 phys_addr_t start, end, orig_start, orig_end; local [all...] |
H A D | memory_hotplug.c | 74 /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */ 138 res->end = start + size - 1; 555 /* pfn is the end pfn of a memory section. */ 1304 /* If the entire pageblock is free, move to the end of free page */ 1334 * Confirm all pages in a range [start, end) is belongs to the same zone. 1360 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages 1365 static unsigned long scan_movable_pages(unsigned long start, unsigned long end) argument 1369 for (pfn = start; pfn < end; pfn++) { 1819 * @end_pfn: end pfn of the memory range
|
H A D | rmap.c | 1295 * maybe we could scan further - to the end of the pte page, perhaps. 1317 unsigned long end; local 1322 end = address + CLUSTER_SIZE; 1325 if (end > vma->vm_end) 1326 end = vma->vm_end; 1333 mmun_end = end; 1351 for (; address < end; pte++, address += PAGE_SIZE) { 1376 * mmu_notifier_invalidate_range_ {start|end} scope.
|
H A D | huge_memory.c | 1427 unsigned long addr, unsigned long end, 1439 memset(vec, 1, (end - addr) >> PAGE_SHIFT); 2943 unsigned long end, 2957 * If the new end address isn't hpage aligned and it could 2961 if (end & ~HPAGE_PMD_MASK && 2962 (end & HPAGE_PMD_MASK) >= vma->vm_start && 2963 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) 2964 split_huge_page_address(vma->vm_mm, end); 1426 mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument 2941 __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) argument
|
H A D | kmemleak.c | 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 652 unsigned long start, end; local 672 end = object->pointer + object->size; 676 if (ptr + size < end) 677 create_object(ptr + size, end - ptr - size, object->min_count, 1150 unsigned long *end = _end - (BYTES_PER_POINTER - 1); local 1152 for (ptr = start; ptr < end; ptr++) { 1231 void *end = (void *)(object->pointer + object->size); local 1233 while (start < end && (object->flags & OBJECT_ALLOCATED) && 1235 scan_block(start, min(start + MAX_SCAN_SIZE, end), [all...] |
H A D | ksm.c | 682 unsigned long start, unsigned long end) 687 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 1739 unsigned long end, int advice, unsigned long *vm_flags) 1773 err = unmerge_ksm_pages(vma, start, end); 1807 * missed: then we might as well insert at the end of the list. 681 unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 1738 ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) argument
|