Searched refs:pages (Results 1 - 25 of 44) sorted by relevance

12

/mm/
H A Dkmemcheck.c11 int pages; local
14 pages = 1 << order;
28 for(i = 0; i < pages; ++i)
36 kmemcheck_hide_pages(page, pages);
42 int pages; local
48 pages = 1 << order;
50 kmemcheck_show_pages(page, pages);
54 for(i = 0; i < pages; ++i)
103 int pages; local
108 pages
[all...]
H A Dpercpu-vm.c23 * pcpu_get_pages - get temp pages array
31 * Pointer to temp pages array on success.
35 static struct page **pages; local
36 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
40 if (!pages)
41 pages = pcpu_mem_zalloc(pages_size);
42 return pages;
46 * pcpu_free_pages - free pages which were allocated for @chunk
47 * @chunk: chunk pages were allocated for
48 * @pages
55 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument
82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument
151 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument
191 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) argument
212 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) argument
276 struct page **pages; local
309 struct page **pages; local
[all...]
H A Dpercpu-km.c52 struct page *pages; local
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
60 if (!pages) {
66 pcpu_set_page_chunk(nth_page(pages, i), chunk);
68 chunk->data = pages;
69 chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
106 printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n",
H A Dgup.c24 * has touched so far, we don't want to allocate unnecessary pages or
106 lru_add_drain(); /* push cached pages to LRU */
185 * Refcount on tail pages are not well-defined and
187 * return when trying to follow tail pages.
231 /* user gate pages are read-only */
348 * Anon pages in shared mappings are surprising: now
370 * __get_user_pages() - pin user pages in memory
374 * @nr_pages: number of pages from start to pin
376 * @pages: array that receives pointers to the pages pinne
425 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument
637 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument
723 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
782 gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
789 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
878 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
940 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument
1000 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument
[all...]
H A Dswap_state.c73 printk("%lu pages in swap cache\n", total_swapcache_pages());
139 * This must be called only on pages that have
214 * This must be called only on pages that have
262 * Passed an array of pages, drop them all from swapcache and then release
265 void free_pages_and_swap_cache(struct page **pages, int nr) argument
267 struct page **pagep = pages;
397 unsigned int pages, max_pages, last_ra; local
409 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
410 if (pages == 2) {
417 pages
[all...]
H A Dmprotect.c67 unsigned long pages = 0; local
86 * Avoid taking write faults for pages we
107 pages++;
123 pages++;
130 return pages;
140 unsigned long pages = 0; local
167 pages += HPAGE_PMD_NR;
179 pages += this_pages;
187 return pages;
196 unsigned long pages local
218 unsigned long pages = 0; local
244 unsigned long pages; local
[all...]
H A Dprocess_vm_access.c25 * process_vm_rw_pages - read/write pages from task specified
26 * @pages: array of pointers to pages we want to copy
33 static int process_vm_rw_pages(struct page **pages, argument
41 struct page *page = *pages++;
62 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
66 * process_vm_rw_single_vec - read/write pages from task specified
70 * @process_pages: struct pages area that can store at least
90 / sizeof(struct pages *);
98 int pages local
[all...]
H A Dmincore.c34 * Huge pages are always in RAM for now, but
225 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) argument
234 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
247 * mincore() returns the memory residency status of the pages in the
255 * contain stale information. Only locked pages are guaranteed to
264 * specify one or more pages which are not currently
272 unsigned long pages; local
284 pages = len >> PAGE_SHIFT;
285 pages += (len & ~PAGE_MASK) != 0;
287 if (!access_ok(VERIFY_WRITE, vec, pages))
[all...]
H A Dreadahead.c40 * before calling, such as the NFS fs marking pages that are cached locally
59 * release a list of pages, invalidating them first if need be
62 struct list_head *pages)
66 while (!list_empty(pages)) {
67 victim = list_to_page(pages);
74 * read_cache_pages - populate an address space with some pages & start reads against them
76 * @pages: The address of a list_head which contains the target pages. These
77 * pages have their ->index populated and are otherwise uninitialised.
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, argument
61 read_cache_pages_invalidate_pages(struct address_space *mapping, struct list_head *pages) argument
111 read_pages(struct address_space *mapping, struct file *filp, struct list_head *pages, unsigned nr_pages) argument
[all...]
H A Dballoon_compaction.c4 * Common interface for making balloon pages movable by compaction.
55 * compaction isolated pages.
64 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
91 * list is empty and there is no isolated pages, then something
92 * went out of track and some balloon pages are lost.
94 * an infinite loop while attempting to release all its pages.
97 if (unlikely(list_empty(&b_dev_info->pages) &&
128 list_add(&page->lru, &b_dev_info->pages);
137 * Avoid burning cycles with pages that are yet under __free_pages(),
147 * As balloon pages ar
[all...]
H A Dbootmem.c57 static unsigned long __init bootmap_bytes(unsigned long pages) argument
59 unsigned long bytes = DIV_ROUND_UP(pages, 8);
65 * bootmem_bootmap_pages - calculate bitmap size in pages
66 * @pages: number of pages the bitmap has to represent
68 unsigned long __init bootmem_bootmap_pages(unsigned long pages) argument
70 unsigned long bytes = bootmap_bytes(pages);
107 * Initially all pages are reserved - setup_arch() has to
137 * @pages: number of available physical pages
141 init_bootmem(unsigned long start, unsigned long pages) argument
175 unsigned long *map, start, end, pages, count = 0; local
[all...]
H A Dfrontswap.c34 * can unilaterally "reclaim" any pages in use with no data loss, thus
348 unsigned long pages = 0, pages_to_unuse = 0; local
354 pages = pages_to_unuse = total_pages_to_unuse;
356 pages = si_frontswap_pages;
359 /* ensure there is enough RAM to fetch pages from frontswap */
360 if (security_vm_enough_memory_mm(current->mm, pages)) {
364 vm_unacct_memory(pages);
375 * Used to check if it's necessory and feasible to unuse pages.
376 * Return 1 when nothing to do, 0 when need to shink pages,
398 * Frontswap, like a true swap device, may unnecessarily retain pages
[all...]
H A Dzsmalloc.c19 * (0-order) pages, it would suffer from very high fragmentation --
23 * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
25 * pages act as a single higher-order page i.e. an object can span 0-order
26 * page boundaries. The code refers to these linked pages as a single entity
53 * page->lru: links together all component pages (except the first page)
65 * page->lru: links together first pages of various zspages.
100 * span more than 1 page which avoids complex case of mapping 2 pages simply
107 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
152 * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
162 * We do not maintain any list for completely empty or full pages
267 zs_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) argument
761 __zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
769 __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
800 __zs_map_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
828 __zs_unmap_object(struct mapping_area *area, struct page *pages[2], int off, int size) argument
1114 struct page *pages[2]; local
1168 struct page *pages[2]; local
[all...]
H A Dswap.c40 /* How many pages do we try to swap or page in/out together? */
48 * This path almost never happens for VM activity - pages are normally
340 * put_pages_list() - release a list of pages
341 * @pages: list of pages threaded on page->lru
343 * Release a list of pages which are strung together on page.lru. Currently
346 void put_pages_list(struct list_head *pages) argument
348 while (!list_empty(pages)) {
351 victim = list_entry(pages->prev, struct page, lru);
359 * get_kernel_pages() - pin kernel pages i
371 get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, struct page **pages) argument
399 get_kernel_page(unsigned long start, int write, struct page **pages) argument
899 release_pages(struct page **pages, int nr, bool cold) argument
[all...]
H A Dworkingset.c19 * Per zone, two clock lists are maintained for file pages: the
20 * inactive and the active list. Freshly faulted pages start out at
21 * the head of the inactive list and page reclaim scans pages from the
24 * whereas active pages are demoted to the inactive list when the
38 * A workload is thrashing when its pages are frequently used but they
42 * In cases where the average access distance between thrashing pages
50 * active pages - which may be used more, hopefully less frequently:
59 * of pages. But a reasonable approximation can be made to measure
60 * thrashing on the inactive list, after which refaulting pages can be
61 * activated optimistically to compete with the existing active pages
274 unsigned long pages; local
[all...]
H A Dcma.c41 unsigned int order_per_bit; /* Order of pages represented by one bit */
72 unsigned long pages)
74 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
150 * @order_per_bit: Order of pages represented by one bit on bitmap.
200 * @order_per_bit: Order of pages represented by one bit on bitmap.
284 * All pages in the reserved area must come from the same zone.
321 * cma_alloc() - allocate pages from contiguous area
323 * @count: Requested number of pages.
324 * @align: Requested alignment of pages (in PAGE_SIZE order).
389 * cma_release() - release allocated pages
71 cma_bitmap_pages_to_bits(struct cma *cma, unsigned long pages) argument
398 cma_release(struct cma *cma, struct page *pages, int count) argument
[all...]
H A Dvmalloc.c113 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
126 struct page *page = pages[*nr];
139 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
149 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
156 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
166 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
174 * will have pfns corresponding to the "pages" array.
176 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
179 pgprot_t prot, struct page **pages)
191 err = vmap_pud_range(pgd, addr, next, prot, pages,
112 vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
138 vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
155 vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
178 vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) argument
199 vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) argument
1096 vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) argument
1229 map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) argument
1273 map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) argument
1532 vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) argument
1563 struct page **pages; local
[all...]
H A Dhighmem.c50 * addresses where physical memory pages are mapped by kmap.
116 unsigned int pages = 0; local
119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
122 pages += zone_page_state(
127 return pages;
284 * For highmem pages, we can't trust "virtual" until
H A Dnobootmem.c62 * free_bootmem_late - free bootmem pages directly to page allocator
170 * free_all_bootmem - release free pages to the buddy allocator
172 * Returns the number of pages actually released.
176 unsigned long pages; local
185 pages = free_low_memory_core_early();
186 totalram_pages += pages;
188 return pages;
197 * Partial pages will be considered reserved and left as they are.
212 * Partial pages will be considered reserved and left as they are.
H A Dhuge_memory.c484 unsigned long pages; local
486 err = kstrtoul(buf, 10, &pages);
487 if (err || !pages || pages > UINT_MAX)
490 khugepaged_pages_to_scan = pages;
948 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
990 struct page **pages; local
994 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
996 if (unlikely(!pages)) {
1002 pages[
2760 unsigned int pages = khugepaged_pages_to_scan; local
[all...]
H A Diov_iter.c447 struct page **pages, size_t maxsize, unsigned maxpages,
468 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
475 struct page ***pages, size_t maxsize,
507 *pages = p;
777 struct page **pages, size_t maxsize, unsigned maxpages,
789 get_page(*pages = bvec->bv_page);
795 struct page ***pages, size_t maxsize,
806 *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
807 if (!*pages)
810 get_page(**pages
446 get_pages_iovec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument
474 get_pages_alloc_iovec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument
776 get_pages_bvec(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument
794 get_pages_alloc_bvec(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument
929 iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) argument
940 iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) argument
[all...]
H A Dnommu.c132 * PAGE_SIZE for 0-order pages.
151 unsigned int foll_flags, struct page **pages,
176 if (pages) {
177 pages[i] = virt_to_page(start);
178 if (pages[i])
179 page_cache_get(pages[i]);
193 * get a list of pages in an address range belonging to the specified process
201 int write, int force, struct page **pages,
211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
312 * Allocate enough pages t
149 __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) argument
199 get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) argument
432 vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) argument
445 vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) argument
1152 struct page *pages; local
1906 __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) argument
[all...]
H A Dutil.c209 int nr_pages, int write, struct page **pages)
216 * get_user_pages_fast() - pin user pages in memory
218 * @nr_pages: number of pages from start to pin
219 * @write: whether pages will be written to
220 * @pages: array that receives pointers to the pages pinned.
223 * Returns number of pages pinned. This may be fewer than the number
224 * requested. If nr_pages is 0 or negative, returns 0. If no pages
235 * pages have to be faulted in, it may turn out to be slightly slower so
240 int nr_pages, int write, struct page **pages)
208 __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument
239 get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) argument
[all...]
H A Ddmapool.c14 * new pages, then splits them up into blocks of the required size.
19 * allocated pages. Each page in the page_list is split into blocks of at
85 unsigned pages = 0; local
90 pages++;
98 pages * (pool->allocation / pool->size),
99 pool->size, pages);
466 * Better have a few empty pages hang around.
H A Dtruncate.c2 * mm/truncate.c - code for taking down pages from address_spaces
152 * any time, and is not supposed to throw away dirty pages. But pages can
154 * discards clean, unused pages.
185 * Used to get rid of pages on hardware memory corruption.
192 * Only punch for normal data pages for now.
203 * It only drops clean, unused pages. The page must be locked.
220 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
225 * Truncate the page cache, removing the pages that are between
226 * specified offsets (and zeroing out partial pages
[all...]

Completed in 223 milliseconds

12