/mm/ |
H A D | percpu-km.c | 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; local 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); 65 for (i = 0; i < nr_pages; i++) 72 pcpu_chunk_populated(chunk, 0, nr_pages); 80 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; local 83 __free_pages(chunk->data, order_base_2(nr_pages)); 94 size_t nr_pages, alloc_pages; local 102 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; 103 alloc_pages = roundup_pow_of_two(nr_pages); 105 if (alloc_pages > nr_pages) [all...] |
H A D | page_isolation.c | 26 arg.nr_pages = pageblock_nr_pages; 59 unsigned long nr_pages; local 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 78 unsigned long flags, nr_pages; local 118 nr_pages = move_freepages_block(zone, page, migratetype); 119 __mod_zone_freepage_state(zone, nr_pages, migratetype); 130 __first_valid_page(unsigned long pfn, unsigned long nr_pages) argument 133 for (i = 0; i < nr_pages; i++) 136 if (unlikely(i == nr_pages)) [all...] |
H A D | percpu-vm.c | 133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) argument 135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 192 int nr_pages) 194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 191 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) argument
|
H A D | gup.c | 374 * @nr_pages: number of pages from start to pin 377 * Should be at least nr_pages long. Or NULL, if caller 384 * requested. If nr_pages is 0 or negative, returns 0. If no pages 426 unsigned long start, unsigned long nr_pages, 434 if (!nr_pages) 470 &start, &nr_pages, i, 516 if (page_increm > nr_pages) 517 page_increm = nr_pages; 520 nr_pages -= page_increm; 521 } while (nr_pages); 425 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument 637 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument 940 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument 1000 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument [all...] |
H A D | hugetlb_cgroup.c | 165 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, argument 171 unsigned long csize = nr_pages * PAGE_SIZE; 198 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, argument 212 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, argument 216 unsigned long csize = nr_pages * PAGE_SIZE; 229 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, argument 232 unsigned long csize = nr_pages * PAGE_SIZE;
|
H A D | process_vm_access.c | 87 unsigned long nr_pages; local 95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; 97 while (!rc && nr_pages && iov_iter_count(iter)) { 98 int pages = min(nr_pages, max_pages_per_loop); 119 nr_pages -= pages; 155 unsigned long nr_pages = 0; local 171 nr_pages = max(nr_pages, nr_pages_iov); 175 if (nr_pages == 0) 178 if (nr_pages > PVM_MAX_PP_ARRAY_COUN [all...] |
H A D | readahead.c | 112 struct list_head *pages, unsigned nr_pages) 121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 127 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) argument
|
H A D | util.c | 209 int nr_pages, int write, struct page **pages) 218 * @nr_pages: number of pages from start to pin 221 * Should be at least nr_pages long. 224 * requested. If nr_pages is 0 or negative, returns 0. If no pages 240 int nr_pages, int write, struct page **pages) 246 ret = get_user_pages(current, mm, start, nr_pages, 208 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument 239 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument
|
H A D | internal.h | 258 int nr_pages = hpage_nr_pages(page); local 261 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 263 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); local
|
H A D | mlock.c | 175 unsigned int nr_pages; local 188 nr_pages = hpage_nr_pages(page); 192 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 205 return nr_pages - 1; 231 unsigned long nr_pages = (end - start) / PAGE_SIZE; local 260 return __get_user_pages(current, mm, start, nr_pages, gup_flags, 559 int nr_pages; local 592 nr_pages = (end - start) >> PAGE_SHIFT; 594 nr_pages = -nr_pages; [all...] |
H A D | page_cgroup.c | 49 unsigned long nr_pages; local 51 nr_pages = NODE_DATA(nid)->node_spanned_pages; 52 if (!nr_pages) 55 table_size = sizeof(struct page_cgroup) * nr_pages; 193 unsigned long nr_pages, 200 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 228 unsigned long nr_pages, int nid) 233 end = SECTION_ALIGN_UP(start_pfn + nr_pages); 249 mn->nr_pages, mn->status_change_nid); 253 mn->nr_pages, m 192 online_page_cgroup(unsigned long start_pfn, unsigned long nr_pages, int nid) argument 227 offline_page_cgroup(unsigned long start_pfn, unsigned long nr_pages, int nid) argument [all...] |
H A D | sparse.c | 198 unsigned long nr_pages = 0; local 206 nr_pages += PAGES_PER_SECTION; 209 return nr_pages * sizeof(struct page); 659 unsigned long magic, nr_pages; local 662 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 665 for (i = 0; i < nr_pages; i++, page++) { 744 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) argument 759 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) argument
|
H A D | swap.c | 367 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1070 pgoff_t start, unsigned nr_pages, 1073 pvec->nr = find_get_entries(mapping, start, nr_pages, 1104 * @nr_pages: The maximum number of pages 1106 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 1116 pgoff_t start, unsigned nr_pages) 1118 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 1124 pgoff_t *index, int tag, unsigned nr_pages) 1127 nr_pages, pvec->pages); 1068 pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages, pgoff_t *indices) argument 1115 pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) argument 1123 pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) argument
|
H A D | filemap.c | 337 int nr_pages; local 345 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 350 for (i = 0; i < nr_pages; i++) { 1212 * @nr_pages: The maximum number of pages 1216 * @nr_pages pages in the mapping. The pages are placed at @pages. 1225 unsigned int nr_pages, struct page **pages) 1231 if (unlikely(!nr_pages)) 1271 if (++ret == nr_pages) 1283 * @nr_pages: The maximum number of pages 1292 unsigned int nr_pages, struc 1224 find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) argument 1291 find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) argument 1367 find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) argument [all...] |
H A D | memory-failure.c | 1034 int nr_pages = 1 << compound_order(hpage); local 1035 for (i = 0; i < nr_pages; i++) 1042 int nr_pages = 1 << compound_order(hpage); local 1043 for (i = 0; i < nr_pages; i++) 1071 unsigned int nr_pages; local 1093 * so nr_pages should be 1 << compound_order. OTOH when errors are on 1095 * measurement is done in normal page units. So nr_pages should be one 1099 nr_pages = 1 << compound_order(hpage); 1101 nr_pages = 1; 1102 atomic_long_add(nr_pages, 1399 unsigned int nr_pages; local [all...] |
H A D | memory_hotplug.c | 247 unsigned long i, pfn, end_pfn, nr_pages; local 252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 255 for (i = 0; i < nr_pages; i++, page++) 261 nr_pages = zone->wait_table_hash_nr_entries 263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; 266 for (i = 0; i < nr_pages; i++, page++) 446 int nr_pages = PAGES_PER_SECTION; local 453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); 458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); 494 __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) argument 712 int nr_pages = PAGES_PER_SECTION; local 756 __remove_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) argument 854 online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) argument 888 node_states_check_changes_online(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) argument 962 online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) argument 1174 u64 nr_pages = size >> PAGE_SHIFT; local 1317 is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) argument 1467 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) argument 1485 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, void *data) argument 1515 can_offline_normal(struct zone *zone, unsigned long nr_pages) argument 1521 can_offline_normal(struct zone *zone, unsigned long nr_pages) argument 1577 node_states_check_changes_offline(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) argument 1670 unsigned long pfn, nr_pages, expire; local 1810 offline_pages(unsigned long start_pfn, unsigned long nr_pages) argument [all...] |
H A D | migrate.c | 474 int nr_pages) 480 for (i = 0; i < nr_pages; ) { 493 int nr_pages; local 498 nr_pages = pages_per_huge_page(h); 500 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { 501 __copy_gigantic_page(dst, src, nr_pages); 507 nr_pages = hpage_nr_pages(src); 510 for (i = 0; i < nr_pages; i++) { 1301 unsigned long nr_pages, 1325 chunk_start < nr_pages; 473 __copy_gigantic_page(struct page *dst, struct page *src, int nr_pages) argument 1300 do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) argument 1386 do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, const void __user **pages, int *status) argument 1429 do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, const void __user * __user *pages, int __user *status) argument 1626 numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) argument [all...] |
H A D | ksm.c | 2053 mn->start_pfn + mn->nr_pages); 2118 unsigned long nr_pages; local 2120 err = kstrtoul(buf, 10, &nr_pages); 2121 if (err || nr_pages > UINT_MAX) 2124 ksm_thread_pages_to_scan = nr_pages;
|
H A D | memory.c | 2841 unsigned long start_addr, nr_pages, mask; local 2846 nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT; 2847 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 2861 pgoff + nr_pages - 1);
|
H A D | mmap.c | 589 unsigned long nr_pages = 0; local 597 nr_pages = (min(end, vma->vm_end) - 608 nr_pages += overlap_len >> PAGE_SHIFT; 611 return nr_pages; 1552 unsigned long nr_pages; local 1561 nr_pages = count_vma_pages_range(mm, addr, addr + len); 1563 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
|
H A D | nommu.c | 150 unsigned long start, unsigned long nr_pages, 166 for (i = 0; i < nr_pages; i++) { 200 unsigned long start, unsigned long nr_pages, 211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 149 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument 199 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument
|
H A D | page-writeback.c | 185 unsigned long nr_pages; local 187 nr_pages = zone_page_state(zone, NR_FREE_PAGES); 188 nr_pages -= min(nr_pages, zone->dirty_balance_reserve); 190 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); 191 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); 193 return nr_pages; 1675 int nr_pages = global_page_state(NR_FILE_DIRTY) + local 1683 bdi_start_writeback(&q->backing_dev_info, nr_pages, 1848 int nr_pages; local [all...] |
H A D | swapfile.c | 134 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 145 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 162 pgoff_t start_page, pgoff_t nr_pages) 167 while (nr_pages) { 171 start_page < se->start_page + se->nr_pages) { 174 sector_t nr_blocks = se->nr_pages - offset; 176 if (nr_blocks > nr_pages) 177 nr_blocks = nr_pages; 179 nr_pages -= nr_blocks; 1597 offset < (se->start_page + se->nr_pages)) { 161 discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) argument 1647 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) argument [all...] |
H A D | vmalloc.c | 1456 for (i = 0; i < area->nr_pages; i++) { 1564 unsigned int nr_pages, array_size, i; local 1568 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1569 array_size = (nr_pages * sizeof(struct page *)); 1571 area->nr_pages = nr_pages; 1587 for (i = 0; i < area->nr_pages; i++) { 1597 area->nr_pages = i; 1612 (area->nr_pages*PAGE_SIZE), area->size); 2585 for (nr = 0; nr < v->nr_pages; n [all...] |
H A D | shmem.c | 1635 unsigned int loff, nr_pages, req_pages; local 1665 nr_pages = min(req_pages, spd.nr_pages_max); 1667 spd.nr_pages = find_get_pages_contig(mapping, index, 1668 nr_pages, spd.pages); 1669 index += spd.nr_pages; 1672 while (spd.nr_pages < nr_pages) { 1677 spd.pages[spd.nr_pages++] = page; 1682 nr_pages = spd.nr_pages; [all...] |