/mm/ |
H A D | madvise.c | 47 unsigned long start, unsigned long end, int behavior) 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); 103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 114 if (start != vma->vm_start) { 115 error = split_vma(mm, vma, start, 1); 139 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, argument 149 for (index = start; index != end; index += PAGE_SIZE) { 155 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); 156 pte = *(orig_pte + ((index - start) / PAGE_SIZ 45 madvise_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument 174 force_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument 219 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 274 madvise_dontneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 297 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 342 madvise_hwpoison(int bhv, unsigned long start, unsigned long end) argument 377 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument [all...] |
H A D | msync.c | 20 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). 26 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start 31 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) 41 if (start & ~PAGE_MASK) 47 end = start + len; 48 if (end < start) 51 if (end == start) 54 * If the interval [start,end) covers some unmapped address ranges, 58 vma = find_vma(mm, start); 63 /* Still start < en [all...] |
H A D | debug-pagealloc.c | 51 unsigned char *start; local 54 start = memchr_inv(mem, PAGE_POISON, bytes); 55 if (!start) 58 for (end = mem + bytes - 1; end > start; end--) { 65 else if (start == end && single_bit_flip(*start, PAGE_POISON)) 70 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, 71 end - start + 1, 1);
|
H A D | mlock.c | 211 * @start: start address 228 unsigned long start, unsigned long end, int *nonblocking) 231 unsigned long nr_pages = (end - start) / PAGE_SIZE; 234 VM_BUG_ON(start & ~PAGE_MASK); 236 VM_BUG_ON_VMA(start < vma->vm_start, vma); 260 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 405 * The function expects that the struct page corresponding to @start address is 413 * @start + PAGE_SIZE when no page could be added by the pte walk. 416 struct vm_area_struct *vma, int zoneid, unsigned long start, 227 __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) argument 415 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) argument 478 munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 554 mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) argument 613 do_mlock(unsigned long start, size_t len, int on) argument 671 __mm_populate(unsigned long start, unsigned long len, int ignore_errors) argument [all...] |
H A D | fremap.c | 124 * @start: start of the remapped virtual memory range 142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, 161 start = start & PAGE_MASK; 165 if (start + size <= start) 181 vma = find_vma(mm, start); 194 if (start < vma->vm_start || start [all...] |
H A D | bootmem.c | 96 unsigned long mapstart, unsigned long start, unsigned long end) 100 mminit_validate_memmodel_limits(&start, &end); 102 bdata->node_min_pfn = start; 110 mapsize = bootmap_bytes(end - start); 113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n", 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); 136 * @start: pfn where the bitmap is to be placed 141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) argument 144 min_low_pfn = start; 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 95 init_bootmem_core(bootmem_data_t *bdata, unsigned long mapstart, unsigned long start, unsigned long end) argument 175 unsigned long *map, start, end, pages, count = 0; local 328 mark_bootmem_node(bootmem_data_t *bdata, unsigned long start, unsigned long end, int reserve, int flags) argument 350 mark_bootmem(unsigned long start, unsigned long end, int reserve, int flags) argument 395 unsigned long start, end; local 416 unsigned long start, end; local 440 unsigned long start, end; local 461 unsigned long start, end; local 497 unsigned long min, max, start, sidx, midx, step; local [all...] |
H A D | nobootmem.c | 85 static void __init __free_pages_memory(unsigned long start, unsigned long end) argument 89 while (start < end) { 90 order = min(MAX_ORDER - 1UL, __ffs(start)); 92 while (start + (1UL << order) > end) 95 __free_pages_bootmem(pfn_to_page(start), order); 97 start += (1UL << order); 101 static unsigned long __init __free_memory_core(phys_addr_t start, argument 104 unsigned long start_pfn = PFN_UP(start); 119 phys_addr_t start, end; local 124 for_each_free_mem_range(i, NUMA_NO_NODE, &start, [all...] |
H A D | readahead.c | 74 * read_cache_pages - populate an address space with some pages & start reads against them 195 * Now start the IO. We ignore I/O errors - if the page is not 292 * ^start ^page marked with PG_readahead 303 * page at (start+size-async_size) with PG_readahead, and use it as readahead 367 ra->start = offset; 387 * start of file 396 if ((offset == (ra->start + ra->size - ra->async_size) || 397 offset == (ra->start + ra->size))) { 398 ra->start += ra->size; 411 pgoff_t start; local 572 pgoff_t start = offset >> PAGE_CACHE_SHIFT; local [all...] |
H A D | mprotect.c | 217 unsigned long start = addr; local 234 flush_tlb_range(vma, start, end); 240 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, argument 247 pages = hugetlb_change_protection(vma, start, end, newprot); 249 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); 256 unsigned long start, unsigned long end, unsigned long newflags) 260 long nrpages = (end - start) >> PAGE_SHIFT; 290 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 291 *pprev = vma_merge(mm, *pprev, start, end, newflags, 301 if (start ! 255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument [all...] |
H A D | gup.c | 373 * @start: starting user address 374 * @nr_pages: number of pages from start to pin 426 unsigned long start, unsigned long nr_pages, 453 if (!vma || start >= vma->vm_end) { 454 vma = find_extend_vma(mm, start); 455 if (!vma && in_gate_area(mm, start)) { 457 ret = get_gate_page(mm, start & PAGE_MASK, 470 &start, &nr_pages, i, 483 page = follow_page_mask(vma, start, foll_flags, &page_mask); 486 ret = faultin_page(tsk, vma, start, 425 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument 637 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument 940 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument 1000 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument [all...] |
H A D | truncate.c | 69 * @offset: start of the range to invalidate 220 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets 246 pgoff_t start; /* inclusive */ local 264 * 'start' and 'end' always covers the range of pages to be fully 266 * start of the range and 'partial_end' at the end of the range. 269 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 281 index = start; 315 struct page *page = find_lock_page(mapping, start - 1); 318 if (start > end) { 350 if (start > 479 invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) argument 582 invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) argument [all...] |
H A D | memory_hotplug.c | 130 static struct resource *register_memory_resource(u64 start, u64 size) argument 137 res->start = start; 138 res->end = start + size - 1; 748 * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 761 resource_size_t start, size; local 770 start = phys_start_pfn << PAGE_SHIFT; 772 ret = release_mem_region_adjustable(&iomem_resource, start, size); 774 resource_size_t endres = start + size - 1; 777 &start, 1081 hotadd_new_pgdat(int nid, u64 start) argument 1171 check_hotplug_memory_range(u64 start, u64 size) argument 1193 should_add_memory_movable(int nid, u64 start, u64 size) argument 1208 zone_for_memory(int nid, u64 start, u64 size, int zone_default) argument 1217 add_memory(int nid, u64 start, u64 size) argument 1365 scan_movable_pages(unsigned long start, unsigned long end) argument 1467 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) argument 1999 remove_memory(int nid, u64 start, u64 size) argument [all...] |
H A D | sparse-vmemmap.c | 91 unsigned long start, unsigned long end) 98 "page_structs\n", start, end - 1); 151 int __meminit vmemmap_populate_basepages(unsigned long start, argument 154 unsigned long addr = start; 181 unsigned long start; local 186 start = (unsigned long)map; 189 if (vmemmap_populate(start, end, nid)) 90 vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) argument
|
H A D | vmacache.c | 111 unsigned long start, 124 if (vma && vma->vm_start == start && vma->vm_end == end) { 110 vmacache_find_exact(struct mm_struct *mm, unsigned long start, unsigned long end) argument
|
H A D | mmu_notifier.c | 110 unsigned long start, 119 young |= mn->ops->clear_flush_young(mn, mm, start, end); 174 unsigned long start, unsigned long end) 182 mn->ops->invalidate_range_start(mn, mm, start, end); 189 unsigned long start, unsigned long end) 197 mn->ops->invalidate_range_end(mn, mm, start, end); 109 __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long start, unsigned long end) argument 173 __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end) argument 188 __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end) argument
|
H A D | vmalloc.c | 178 static int vmap_page_range_noflush(unsigned long start, unsigned long end, argument 183 unsigned long addr = start; 199 static int vmap_page_range(unsigned long start, unsigned long end, argument 204 ret = vmap_page_range_noflush(start, end, prot, pages); 205 flush_cache_vmap(start, end); 526 static void vmap_debug_free_range(unsigned long start, unsigned long end) argument 542 vunmap_page_range(start, end); 543 flush_tlb_kernel_range(start, end); 590 * If force_flush is 1, then flush kernel TLBs between *start and *end even 593 * Returns with *start 596 __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) argument 655 unsigned long start = ULONG_MAX, end = 0; local 665 unsigned long start = ULONG_MAX, end = 0; local 1014 unsigned long start = ULONG_MAX, end = 0; local 1309 __get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) argument 1344 __get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) argument 1352 __get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) argument 1632 __vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, int node, const void *caller) argument 2370 unsigned long base, start, end, last_end; local [all...] |
H A D | mincore.c | 268 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, 275 /* Check the start address: needs to be page-aligned.. */ 276 if (start & ~PAGE_CACHE_MASK) 280 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); 312 start += retval << PAGE_SHIFT;
|
H A D | mempolicy.c | 658 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, argument 664 vma = find_vma(mm, start); 673 if (vma->vm_start > start) 674 start = vma->vm_start; 686 change_prot_numa(vma, start, endvma); 694 err = queue_pages_pgd_range(vma, start, endvma, nodes, 742 static int mbind_range(struct mm_struct *mm, unsigned long start, argument 753 vma = find_vma(mm, start); 754 if (!vma || vma->vm_start > start) 758 if (start > vm 1148 new_page(struct page *page, unsigned long start, int **x) argument 1183 new_page(struct page *page, unsigned long start, int **x) argument 1189 do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) argument 2179 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) argument 2351 sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) argument 2359 sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) argument 2381 shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) argument [all...] |
H A D | memblock.c | 105 * @start: start of candidate range 117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, argument 124 this_start = clamp(this_start, start, end); 125 this_end = clamp(this_end, start, end); 137 * @start: start of candidate range 149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, argument 156 this_start = clamp(this_start, start, end); 157 this_end = clamp(this_end, start, en 191 memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument 251 memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) argument 1040 memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid) argument 1061 memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end) argument 1461 phys_addr_t start, end, orig_start, orig_end; local [all...] |
H A D | nommu.c | 150 unsigned long start, unsigned long nr_pages, 167 vma = find_vma(mm, start); 177 pages[i] = virt_to_page(start); 183 start = (start + PAGE_SIZE) & PAGE_MASK; 200 unsigned long start, unsigned long nr_pages, 211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 691 long start = vma->vm_start & PAGE_MASK; local 692 while (start < vma->vm_end) { 693 protect_page(mm, start, flag 149 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument 199 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument 1330 unsigned long pglen, rpglen, pgend, rpgend, start; local 1653 do_munmap(struct mm_struct *mm, unsigned long start, size_t len) argument 1852 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument [all...] |
H A D | page_cgroup.c | 196 unsigned long start, end, pfn; local 199 start = SECTION_ALIGN_DOWN(start_pfn); 212 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 221 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) 230 unsigned long start, end, pfn; local 232 start = SECTION_ALIGN_DOWN(start_pfn); 235 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
|
H A D | kmemleak.c | 124 unsigned long start; member in struct:kmemleak_scan_area 652 unsigned long start, end; local 671 start = object->pointer; 673 if (ptr > start) 674 create_object(start, ptr - start, object->min_count, 767 area->start = ptr; 953 * represents the start of the range to be freed 1067 * represents the start of the scan area 1149 unsigned long *start local 1230 void *start = (void *)object->pointer; local [all...] |
H A D | sparse.c | 170 void __init memory_present(int nid, unsigned long start, unsigned long end) argument 174 start &= PAGE_SECTION_MASK; 175 mminit_validate_memmodel_limits(&start, &end); 176 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 509 /* new start, update count etc*/ 605 unsigned long start = (unsigned long)memmap; local 608 vmemmap_free(start, end); 613 unsigned long start = (unsigned long)memmap; local 616 vmemmap_free(start, end);
|
H A D | iov_iter.c | 448 size_t *start) 463 len += *start = addr & (PAGE_SIZE - 1); 471 return (res == n ? len : res * PAGE_SIZE) - *start; 476 size_t *start) 492 len += *start = addr & (PAGE_SIZE - 1); 508 return (res == n ? len : res * PAGE_SIZE) - *start; 778 size_t *start) 787 *start = bvec->bv_offset + i->iov_offset; 796 size_t *start) 804 *start 446 get_pages_iovec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 474 get_pages_alloc_iovec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument 776 get_pages_bvec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 794 get_pages_alloc_bvec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument 929 iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 940 iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument [all...] |
H A D | internal.h | 36 ra->start, ra->size, ra->async_size); 151 * at the end of a zone and migrate_pfn begins at the start. Movable pages 225 unsigned long start, unsigned long end, int *nonblocking); 227 unsigned long start, unsigned long end);
|