/mm/ |
H A D | kmemcheck.c | 11 int pages; local 14 pages = 1 << order; 28 for(i = 0; i < pages; ++i) 36 kmemcheck_hide_pages(page, pages); 42 int pages; local 48 pages = 1 << order; 50 kmemcheck_show_pages(page, pages); 54 for(i = 0; i < pages; ++i) 103 int pages; local 108 pages [all...] |
H A D | percpu-km.c | 52 struct page *pages; local 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); 60 if (!pages) { 66 pcpu_set_page_chunk(nth_page(pages, i), chunk); 68 chunk->data = pages; 69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0]; 106 printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n",
|
H A D | mincore.c | 34 * Huge pages are always in RAM for now, but 225 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) argument 234 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); 247 * mincore() returns the memory residency status of the pages in the 255 * contain stale information. Only locked pages are guaranteed to 264 * specify one or more pages which are not currently 272 unsigned long pages; local 284 pages = len >> PAGE_SHIFT; 285 pages += (len & ~PAGE_MASK) != 0; 287 if (!access_ok(VERIFY_WRITE, vec, pages)) [all...] |
H A D | percpu-vm.c | 23 * pcpu_get_pages - get temp pages array 31 * Pointer to temp pages array on success. 35 static struct page **pages; local 36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 40 if (!pages) 41 pages = pcpu_mem_zalloc(pages_size); 42 return pages; 46 * pcpu_free_pages - free pages which were allocated for @chunk 47 * @chunk: chunk pages were allocated for 48 * @pages 55 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument 82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument 151 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument 191 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) argument 212 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument 276 struct page **pages; local 309 struct page **pages; local [all...] |
H A D | workingset.c | 19 * Per zone, two clock lists are maintained for file pages: the 20 * inactive and the active list. Freshly faulted pages start out at 21 * the head of the inactive list and page reclaim scans pages from the 24 * whereas active pages are demoted to the inactive list when the 38 * A workload is thrashing when its pages are frequently used but they 42 * In cases where the average access distance between thrashing pages 50 * active pages - which may be used more, hopefully less frequently: 59 * of pages. But a reasonable approximation can be made to measure 60 * thrashing on the inactive list, after which refaulting pages can be 61 * activated optimistically to compete with the existing active pages 274 unsigned long pages; local [all...] |
H A D | zpool.c | 272 * @pages The number of pages to shrink the pool. 273 * @reclaimed The number of pages successfully evicted. 279 * parameter will be set to the number of pages reclaimed, 280 * which may be more than the number of pages requested. 286 int zpool_shrink(struct zpool *zpool, unsigned int pages, argument 289 return zpool->driver->shrink(zpool->pool, pages, reclaimed);
|
H A D | cma.c | 41 unsigned int order_per_bit; /* Order of pages represented by one bit */ 72 unsigned long pages) 74 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; 150 * @order_per_bit: Order of pages represented by one bit on bitmap. 200 * @order_per_bit: Order of pages represented by one bit on bitmap. 284 * All pages in the reserved area must come from the same zone. 321 * cma_alloc() - allocate pages from contiguous area 323 * @count: Requested number of pages. 324 * @align: Requested alignment of pages (in PAGE_SIZE order). 389 * cma_release() - release allocated pages 71 cma_bitmap_pages_to_bits(struct cma *cma, unsigned long pages) argument 398 cma_release(struct cma *cma, struct page *pages, int count) argument [all...] |
H A D | dmapool.c | 14 * new pages, then splits them up into blocks of the required size. 19 * allocated pages. Each page in the page_list is split into blocks of at 85 unsigned pages = 0; local 90 pages++; 98 pages * (pool->allocation / pool->size), 99 pool->size, pages); 466 * Better have a few empty pages hang around.
|
H A D | frontswap.c | 34 * can unilaterally "reclaim" any pages in use with no data loss, thus 348 unsigned long pages = 0, pages_to_unuse = 0; local 354 pages = pages_to_unuse = total_pages_to_unuse; 356 pages = si_frontswap_pages; 359 /* ensure there is enough RAM to fetch pages from frontswap */ 360 if (security_vm_enough_memory_mm(current->mm, pages)) { 364 vm_unacct_memory(pages); 375 * Used to check if it's necessory and feasible to unuse pages. 376 * Return 1 when nothing to do, 0 when need to shink pages, 398 * Frontswap, like a true swap device, may unnecessarily retain pages [all...] |
H A D | gup.c | 24 * has touched so far, we don't want to allocate unnecessary pages or 106 lru_add_drain(); /* push cached pages to LRU */ 185 * Refcount on tail pages are not well-defined and 187 * return when trying to follow tail pages. 231 /* user gate pages are read-only */ 348 * Anon pages in shared mappings are surprising: now 370 * __get_user_pages() - pin user pages in memory 374 * @nr_pages: number of pages from start to pin 376 * @pages: array that receives pointers to the pages pinne 425 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument 637 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument 723 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 782 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 789 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 878 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument 940 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument 1000 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument [all...] |
H A D | highmem.c | 50 * addresses where physical memory pages are mapped by kmap. 116 unsigned int pages = 0; local 119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 122 pages += zone_page_state( 127 return pages; 284 * For highmem pages, we can't trust "virtual" until
|
H A D | mprotect.c | 67 unsigned long pages = 0; local 86 * Avoid taking write faults for pages we 107 pages++; 123 pages++; 130 return pages; 140 unsigned long pages = 0; local 167 pages += HPAGE_PMD_NR; 179 pages += this_pages; 187 return pages; 196 unsigned long pages local 218 unsigned long pages = 0; local 244 unsigned long pages; local [all...] |
H A D | mremap.c | 260 * Advise KSM to break any KSM pages in the area to be moved: 263 * pages recently unmapped. But leave vma->vm_flags as it was, 516 * the unnecessary pages.. 541 int pages = (new_len - old_len) >> PAGE_SHIFT; local 549 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); 551 mm->locked_vm += pages;
|
H A D | nobootmem.c | 62 * free_bootmem_late - free bootmem pages directly to page allocator 170 * free_all_bootmem - release free pages to the buddy allocator 172 * Returns the number of pages actually released. 176 unsigned long pages; local 185 pages = free_low_memory_core_early(); 186 totalram_pages += pages; 188 return pages; 197 * Partial pages will be considered reserved and left as they are. 212 * Partial pages will be considered reserved and left as they are.
|
H A D | process_vm_access.c | 25 * process_vm_rw_pages - read/write pages from task specified 26 * @pages: array of pointers to pages we want to copy 33 static int process_vm_rw_pages(struct page **pages, argument 41 struct page *page = *pages++; 62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */ 66 * process_vm_rw_single_vec - read/write pages from task specified 70 * @process_pages: struct pages area that can store at least 90 / sizeof(struct pages *); 98 int pages local [all...] |
H A D | readahead.c | 40 * before calling, such as the NFS fs marking pages that are cached locally 59 * release a list of pages, invalidating them first if need be 62 struct list_head *pages) 66 while (!list_empty(pages)) { 67 victim = list_to_page(pages); 74 * read_cache_pages - populate an address space with some pages & start reads against them 76 * @pages: The address of a list_head which contains the target pages. These 77 * pages have their ->index populated and are otherwise uninitialised. 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, argument 61 read_cache_pages_invalidate_pages(struct address_space *mapping, struct list_head *pages) argument 111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) argument [all...] |
H A D | swap_state.c | 73 printk("%lu pages in swap cache\n", total_swapcache_pages()); 139 * This must be called only on pages that have 214 * This must be called only on pages that have 262 * Passed an array of pages, drop them all from swapcache and then release 265 void free_pages_and_swap_cache(struct page **pages, int nr) argument 267 struct page **pagep = pages; 397 unsigned int pages, max_pages, last_ra; local 409 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; 410 if (pages == 2) { 417 pages [all...] |
H A D | util.c | 209 int nr_pages, int write, struct page **pages) 216 * get_user_pages_fast() - pin user pages in memory 218 * @nr_pages: number of pages from start to pin 219 * @write: whether pages will be written to 220 * @pages: array that receives pointers to the pages pinned. 223 * Returns number of pages pinned. This may be fewer than the number 224 * requested. If nr_pages is 0 or negative, returns 0. If no pages 235 * pages have to be faulted in, it may turn out to be slightly slower so 240 int nr_pages, int write, struct page **pages) 208 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument 239 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument [all...] |
H A D | iov_iter.c | 447 struct page **pages, size_t maxsize, unsigned maxpages, 468 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 475 struct page ***pages, size_t maxsize, 507 *pages = p; 777 struct page **pages, size_t maxsize, unsigned maxpages, 789 get_page(*pages = bvec->bv_page); 795 struct page ***pages, size_t maxsize, 806 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL); 807 if (!*pages) 810 get_page(**pages 446 get_pages_iovec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 474 get_pages_alloc_iovec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument 776 get_pages_bvec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 794 get_pages_alloc_bvec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument 929 iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument 940 iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument [all...] |
H A D | zbud.c | 8 * zbud is an special purpose allocator for storing compressed pages. Contrary 10 * allocator that "buddies" two compressed pages together in a single memory 17 * zbud works by storing compressed pages, or "zpages", together in pairs in a 26 * to zbud pages can not be less than 1. This ensures that zbud can never "do 27 * harm" by using more pages to store zpages than the uncompressed zpages would 30 * zbud pages are divided into "chunks". The size of the chunks is fixed at 31 * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages 32 * into chunks allows organizing unbuddied zbud pages into a manageable number 79 * @unbuddied: array of lists tracking zbud pages that only contain one buddy; 82 * @buddied: list tracking the zbud pages tha 153 zbud_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) argument [all...] |
H A D | bootmem.c | 57 static unsigned long __init bootmap_bytes(unsigned long pages) argument 59 unsigned long bytes = DIV_ROUND_UP(pages, 8); 65 * bootmem_bootmap_pages - calculate bitmap size in pages 66 * @pages: number of pages the bitmap has to represent 68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) argument 70 unsigned long bytes = bootmap_bytes(pages); 107 * Initially all pages are reserved - setup_arch() has to 137 * @pages: number of available physical pages 141 init_bootmem(unsigned long start, unsigned long pages) argument 175 unsigned long *map, start, end, pages, count = 0; local [all...] |
H A D | swap.c | 40 /* How many pages do we try to swap or page in/out together? */ 48 * This path almost never happens for VM activity - pages are normally 340 * put_pages_list() - release a list of pages 341 * @pages: list of pages threaded on page->lru 343 * Release a list of pages which are strung together on page.lru. Currently 346 void put_pages_list(struct list_head *pages) argument 348 while (!list_empty(pages)) { 351 victim = list_entry(pages->prev, struct page, lru); 359 * get_kernel_pages() - pin kernel pages i 371 get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, struct page **pages) argument 399 get_kernel_page(unsigned long start, int write, struct page **pages) argument 899 release_pages(struct page **pages, int nr, bool cold) argument [all...] |
H A D | filemap.c | 222 * This must be called only on pages that have been verified to be in the page 258 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 264 * Start writeback against all of a mapping's dirty pages that lie 314 * purposes - I/O may not be started against all dirty pages. 328 * Walk the list of under-writeback pages of the given address space 351 struct page *page = pvec.pages[i]; 374 * filemap_fdatawait - wait for all under-writeback pages to complete 377 * Walk the list of under-writeback pages of the given address space 398 * Even if the above returned error, the pages may be 417 * @mapping: the address_space for the pages 1224 find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) argument 1291 find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) argument 1367 find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) argument [all...] |
H A D | memblock.c | 1288 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1324 unsigned long pages = 0; local 1333 pages += end_pfn - start_pfn; 1336 return PFN_PHYS(pages);
|
H A D | migrate.c | 49 * migrate_prep() needs to be called before we start compiling a list of pages 56 * Clear the LRU lists so pages can be isolated. 57 * Note that pages may be moved off the LRU after we have 58 * drained them. Those pages will fail to migrate like other 59 * pages that may be busy. 75 * Put previously isolated pages back onto the appropriate lists 336 * 1 for anonymous pages without a mapping 337 * 2 for pages with a mapping 338 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. 410 * Note that anonymous pages ar 1300 do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) argument 1386 do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, const void __user **pages, int *status) argument 1429 do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, const void __user * __user *pages, int __user *status) argument [all...] |