/mm/ |
H A D | balloon_compaction.c | 14 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 15 * page list. 16 * @b_dev_info: balloon device decriptor where we will insert a new page to 18 * Driver must call it to properly allocate a new enlisted balloon page 20 * This function returns the page address for the recently enqueued page or 21 * NULL in the case we fail to allocate a new page this turn. 23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) 26 struct page *page local 59 struct page *page, *tmp; local 109 __isolate_balloon_page(struct page *page) argument 121 __putback_balloon_page(struct page *page) argument 134 balloon_page_isolate(struct page *page) argument 176 balloon_page_putback(struct page *page) argument 196 balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) argument [all...] |
H A D | debug-pagealloc.c | 5 #include <linux/page-debug-flags.h> 9 static inline void set_page_poison(struct page *page) argument 11 __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); 14 static inline void clear_page_poison(struct page *page) argument 16 __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags); 19 static inline bool page_poison(struct page *page) argument 21 return test_bit(PAGE_DEBUG_FLAG_POISON, &page 24 poison_page(struct page *page) argument 33 poison_pages(struct page *page, int n) argument 75 unpoison_page(struct page *page) argument 88 unpoison_pages(struct page *page, int n) argument 96 kernel_map_pages(struct page *page, int numpages, int enable) argument [all...] |
H A D | internal.h | 20 static inline void set_page_count(struct page *page, int v) argument 22 atomic_set(&page->_count, v); 40 * Turn a non-refcounted page (->_count == 0) into refcounted with 43 static inline void set_page_refcounted(struct page *page) argument 45 VM_BUG_ON_PAGE(PageTail(page), page); local 46 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); 50 __get_page_tail_foll(struct page *page, bool get_page_head) argument 75 get_page_foll(struct page *page) argument 195 page_order(struct page *page) argument 254 mlock_migrate_page(struct page *newpage, struct page *page) argument 275 clear_page_mlock(struct page *page) argument 276 mlock_vma_page(struct page *page) argument 356 mminit_verify_page_links(struct page *page, enum zone_type zone, unsigned long nid, unsigned long pfn) argument [all...] |
H A D | truncate.c | 38 * Regular page slots are stabilized by the page lock even 67 * do_invalidatepage - invalidate part or all of a page 68 * @page: the page which is affected 72 * do_invalidatepage() is called when all or part of the page has become 81 void do_invalidatepage(struct page *page, unsigned int offset, argument 84 void (*invalidatepage)(struct page *, unsigned int, unsigned int); 86 invalidatepage = page 109 cancel_dirty_page(struct page *page, unsigned int account_size) argument 135 truncate_complete_page(struct address_space *mapping, struct page *page) argument 159 invalidate_complete_page(struct address_space *mapping, struct page *page) argument 174 truncate_inode_page(struct address_space *mapping, struct page *page) argument 187 generic_error_remove_page(struct address_space *mapping, struct page *page) argument 207 invalidate_inode_page(struct page *page) argument 286 struct page *page = pvec.pages[i]; local 315 struct page *page = find_lock_page(mapping, start - 1); local 334 struct page *page = find_lock_page(mapping, end); local 372 struct page *page = pvec.pages[i]; local 494 struct page *page = pvec.pages[i]; local 536 invalidate_complete_page2(struct address_space *mapping, struct page *page) argument 562 do_launder_page(struct address_space *mapping, struct page *page) argument 600 struct page *page = pvec.pages[i]; local 756 struct page *page; local [all...] |
H A D | page_isolation.c | 6 #include <linux/page-isolation.h> 12 int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages) argument 20 zone = page_zone(page); 24 pfn = page_to_pfn(page); 48 if (!has_unmovable_pages(zone, page, arg.pages_found, 60 int migratetype = get_pageblock_migratetype(page); 62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 75 void unset_migratetype_isolate(struct page *pag argument 160 struct page *page; local 192 struct page *page; local 216 struct page *page; local 236 move_freepages(page_zone(page), page, end_page, local 264 struct page *page; local 290 alloc_migrate_target(struct page *page, unsigned long private, int **resultp) argument [all...] |
H A D | filemap.c | 56 * finished 'unifying' the page and buffer cache and SMP-threaded the 57 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 113 struct page *page, void *shadow) 121 VM_BUG_ON(!PageLocked(page)); 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); 144 /* Clear tree tags for the removed page */ 145 index = page->index; 152 /* Delete page, swap shadow entry */ 176 * Delete a page fro 112 page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) argument 180 __delete_from_page_cache(struct page *page, void *shadow) argument 226 delete_from_page_cache(struct page *page) argument 351 struct page *page = pvec.pages[i]; local 503 page_cache_tree_insert(struct address_space *mapping, struct page *page, void **shadowp) argument 545 __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, void **shadowp) argument 555 VM_BUG_ON_PAGE(PageSwapBacked(page), page); local 606 add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument 614 add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) argument 646 struct page *page; local 673 page_waitqueue(struct page *page) argument 681 wait_on_page_bit(struct page *page, int bit_nr) argument 691 wait_on_page_bit_killable(struct page *page, int bit_nr) argument 702 wait_on_page_bit_killable_timeout(struct page *page, int bit_nr, unsigned long timeout) argument 722 add_page_wait_queue(struct page *page, wait_queue_t *waiter) argument 745 unlock_page(struct page *page) argument 758 end_page_writeback(struct page *page) argument 784 page_endio(struct page *page, int rw, int err) argument 809 __lock_page(struct page *page) argument 818 __lock_page_killable(struct page *page) argument 838 __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) argument 897 struct page *page; local 938 struct page *page; local 968 struct page *page; local 1026 struct page *page; local 1073 struct page *page; local 1175 struct page *page; local 1237 struct page *page; local 1304 struct page *page; local 1381 struct page *page; local 1487 struct page *page; local 1756 struct page *page; local 1828 do_async_mmap_readahead(struct vm_area_struct *vma, struct file_ra_state *ra, struct file *file, struct page *page, pgoff_t offset) argument 1878 struct page *page; local 1998 struct page *page; local 2065 struct page *page = vmf->page; local 2133 wait_on_page_read(struct page *page) argument 2151 struct page *page; local 2185 struct page *page; local 2354 pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) argument 2439 struct page *page; local 2471 struct page *page; local 2695 try_to_release_page(struct page *page, gfp_t gfp_mask) argument [all...] |
H A D | swap.c | 40 /* How many pages do we try to swap or page in/out together? */ 51 static void __page_cache_release(struct page *page) argument 53 if (PageLRU(page)) { 54 struct zone *zone = page_zone(page); 59 lruvec = mem_cgroup_page_lruvec(page, zone); 60 VM_BUG_ON_PAGE(!PageLRU(page), page); 61 __ClearPageLRU(page); 62 del_page_from_lru_list(page, lruve 68 __put_single_page(struct page *page) argument 74 __put_compound_page(struct page *page) argument 110 put_unrefcounted_compound_page(struct page *page_head, struct page *page) argument 126 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page); local 158 put_refcounted_compound_page(struct page *page_head, struct page *page) argument 206 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); local 220 VM_BUG_ON_PAGE(PageTail(page), page); local 225 put_compound_page(struct page *page) argument 264 put_page(struct page *page) argument 277 __get_page_tail(struct page *page) argument 410 pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), void *arg) argument 420 struct page *page = pvec->pages[i]; local 439 pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, void *arg) argument 468 rotate_reclaimable_page(struct page *page) argument 494 __activate_page(struct page *page, struct lruvec *lruvec, void *arg) argument 528 activate_page(struct page *page) argument 550 activate_page(struct page *page) argument 560 __lru_cache_activate_page(struct page *page) argument 597 mark_page_accessed(struct page *page) argument 621 __lru_cache_add(struct page *page) argument 636 lru_cache_add_anon(struct page *page) argument 643 lru_cache_add_file(struct page *page) argument 660 lru_cache_add(struct page *page) argument 663 VM_BUG_ON_PAGE(PageLRU(page), page); local 677 add_page_to_unevictable_list(struct page *page) argument 701 lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma) argument 704 VM_BUG_ON_PAGE(PageLRU(page), page); local 746 lru_deactivate_fn(struct page *page, struct lruvec *lruvec, void *arg) argument 829 deactivate_page(struct page *page) argument 909 struct page *page = pages[i]; local 984 lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *list) argument 990 VM_BUG_ON_PAGE(PageCompound(page_tail), page); local 991 VM_BUG_ON_PAGE(PageLRU(page_tail), page); local 1023 __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, void *arg) argument 1030 VM_BUG_ON_PAGE(PageLRU(page), page); local 1092 struct page *page = pvec->pages[i]; local [all...] |
H A D | swap_state.c | 7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 86 int __add_to_swap_cache(struct page *page, swp_entry_t entry) argument 91 VM_BUG_ON_PAGE(!PageLocked(page), page); 92 VM_BUG_ON_PAGE(PageSwapCache(page), page); local 93 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 95 page_cache_get(page); 126 add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) argument 142 __delete_from_swap_cache(struct page *page) argument 149 VM_BUG_ON_PAGE(PageWriteback(page), page); local 168 add_to_swap(struct page *page, struct list_head *list) argument 219 delete_from_swap_cache(struct page *page) argument 243 free_swap_cache(struct page *page) argument 255 free_page_and_swap_cache(struct page *page) argument 284 struct page *page; local 460 struct page *page; local [all...] |
H A D | page_io.c | 9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie 28 struct page *page, bio_end_io_t end_io) 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); 36 bio->bi_io_vec[0].bv_page = page; 49 struct page *page = bio->bi_io_vec[0].bv_page; local 52 SetPageError(page); 54 * We failed to write the page out to swap-space. 55 * Re-dirty the page i 27 get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) argument 75 struct page *page = bio->bi_io_vec[0].bv_page; local 232 swap_writepage(struct page *page, struct writeback_control *wbc) argument 251 swap_page_sector(struct page *page) argument 256 __swap_writepage(struct page *page, struct writeback_control *wbc, void (*end_write_func)(struct bio *, int)) argument 336 swap_readpage(struct page *page) argument 343 VM_BUG_ON_PAGE(PageUptodate(page), page); local 379 swap_set_page_dirty(struct page *page) argument [all...] |
H A D | kmemcheck.c | 8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) argument 10 struct page *shadow; 29 page[i].shadow = page_address(&shadow[i]); 33 * this memory will trigger a page fault and let us analyze 36 kmemcheck_hide_pages(page, pages); 39 void kmemcheck_free_shadow(struct page *page, int order) argument 41 struct page *shadow; 45 if (!kmemcheck_page_is_tracked(page)) 100 kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, gfp_t gfpflags) argument [all...] |
H A D | migrate.c | 79 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() 84 struct page *page; local 85 struct page *page2; 87 list_for_each_entry_safe(page, page2, l, lru) { 88 if (unlikely(PageHuge(page))) { 89 putback_active_hugepage(page); 92 list_del(&page->lru); 93 dec_zone_page_state(page, NR_ISOLATED_ANON + 94 page_is_file_cache(page)); 194 remove_linear_migration_ptes_from_nonlinear(struct page *page, struct address_space *mapping, void *arg) argument 237 struct page *page; local 340 migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, struct buffer_head *head, enum migrate_mode mode, int extra_count) argument 429 migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) argument 519 migrate_page_copy(struct page *newpage, struct page *page) argument 535 VM_BUG_ON_PAGE(PageUnevictable(page), page); local 594 migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument 618 buffer_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument 675 writeout(struct address_space *mapping, struct page *page) argument 716 fallback_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode) argument 748 move_to_new_page(struct page *newpage, struct page *page, int remap_swapcache, enum migrate_mode mode) argument 797 __unmap_and_move(struct page *page, struct page *newpage, int force, enum migrate_mode mode) argument 904 VM_BUG_ON_PAGE(PageAnon(page), page); local 936 unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *page, int force, enum migrate_mode mode) argument 1117 struct page *page; local 1180 struct page *page; member in struct:page_to_node 1227 struct page *page; local 1396 struct page *page; local 1581 alloc_misplaced_dst_page(struct page *page, unsigned long data, int **result) argument 1657 numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) argument 1697 struct page *page = pmd_page(pmd); local 1703 struct page *page = pmd_page(*pmd); local 1712 migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) argument 1768 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) argument [all...] |
H A D | highmem.c | 12 * Rewrote high memory support to move the page cache into 55 * Determine color of virtual address where the page should be mapped. 57 static inline unsigned int get_pkmap_color(struct page *page) argument 64 * Get next index for mapping inside PKMAP region for page with given color. 75 * Determine if page index inside PKMAP region (pkmap_nr) of given color 154 struct page *kmap_to_page(void *vaddr) 175 struct page *page; local 192 * no-one has the page mappe 217 map_new_virtual(struct page *page) argument 279 kmap_high(struct page *page) argument 310 kmap_high_get(struct page *page) argument 332 kunmap_high(struct page *page) argument 386 struct page *page; member in struct:page_address_map 401 page_slot(const struct page *page) argument 412 page_address(const struct page *page) argument 446 set_page_address(struct page *page, void *virtual) argument [all...] |
H A D | rmap.c | 10 * Provides methods for unmapping each kind of mapped page: 25 * page->flags PG_locked (lock_page) 384 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 388 * have been relevant to this page. 390 * The page might have been remapped to a different anon_vma or the anon_vma 395 * ensure that any anon_vma obtained from the page will still be valid for as 399 * chain and verify that the page in question is indeed mapped in it 403 * that the anon_vma pointer from page->mapping is valid if there is a 406 struct anon_vma *page_get_anon_vma(struct page *page) argument 449 page_lock_anon_vma_read(struct page *page) argument 520 __vma_address(struct page *page, struct vm_area_struct *vma) argument 527 vma_address(struct page *page, struct vm_area_struct *vma) argument 541 page_address_in_vma(struct page *page, struct vm_area_struct *vma) argument 602 __page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp, int sync) argument 650 page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) argument 676 page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 770 page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) argument 818 page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 860 page_mkclean(struct page *page) argument 896 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 916 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 945 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 977 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 988 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1026 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1045 page_add_file_rmap(struct page *page) argument 1059 page_remove_file_rmap(struct page *page) argument 1095 page_remove_rmap(struct page *page) argument 1138 try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 1313 struct page *page; local 1409 try_to_unmap_nonlinear(struct page *page, struct address_space *mapping, void *arg) argument 1502 page_not_mapped(struct page *page) argument 1521 try_to_unmap(struct page *page, enum ttu_flags flags) argument 1567 try_to_munlock(struct page *page) argument 1599 rmap_walk_anon_lock(struct page *page, struct rmap_walk_control *rwc) argument 1635 rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) argument 1676 rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) argument 1720 rmap_walk(struct page *page, struct rmap_walk_control *rwc) argument 1736 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1753 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1767 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument [all...] |
H A D | dmapool.c | 13 * the given device. It uses the dma_alloc_coherent page allocator to get 19 * allocated pages. Each page in the page_list is split into blocks of at 21 * list of free blocks within the page. Used blocks aren't tracked, but we 22 * keep a count of how many are currently allocated from each page. 73 struct dma_page *page; local 89 list_for_each_entry(page, &pool->page_list, page_list) { 91 blocks += page->in_use; 206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) argument 217 *(int *)(page->vaddr + offset) = next; 224 struct dma_page *page; local 245 is_page_busy(struct dma_page *page) argument 250 pool_free_page(struct dma_pool *pool, struct dma_page *page) argument 285 struct dma_page *page; local 322 struct dma_page *page; local 384 struct dma_page *page; local 406 struct dma_page *page; local [all...] |
H A D | mlock.c | 42 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will 49 * may have mlocked a page that is being munlocked. So lazy mlock must take 57 void clear_page_mlock(struct page *page) argument 59 if (!TestClearPageMlocked(page)) 62 mod_zone_page_state(page_zone(page), NR_MLOCK, 63 -hpage_nr_pages(page)); 65 if (!isolate_lru_page(page)) { 66 putback_lru_page(page); 80 mlock_vma_page(struct page *page) argument 98 __munlock_isolate_lru_page(struct page *page, bool getpage) argument 120 __munlock_isolated_page(struct page *page) argument 147 __munlock_isolation_failed(struct page *page) argument 173 munlock_vma_page(struct page *page) argument 288 __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, int *pgrescued) argument 291 VM_BUG_ON_PAGE(PageLRU(page), page); local 345 struct page *page = pvec->pages[i]; local 376 struct page *page = pvec->pages[i]; local 436 struct page *page = NULL; local 484 struct page *page = NULL; local [all...] |
H A D | zsmalloc.c | 17 * never attempts higher order page allocation which is very likely to 20 * any object of size PAGE_SIZE/2 or larger would occupy an entire page. 24 * and links them together using various 'struct page' fields. These linked 25 * pages act as a single higher-order page i.e. an object can span 0-order 26 * page boundaries. The code refers to these linked pages as a single entity 31 * worst case, page is incompressible and is thus stored "as-is" i.e. in 45 * struct page(s) to form a zspage. 47 * Usage of struct page fields: 48 * page->first_page: points to the first component (0-order) page 322 is_first_page(struct page *page) argument 327 is_last_page(struct page *page) argument 332 get_zspage_mapping(struct page *page, unsigned int *class_idx, enum fullness_group *fullness) argument 343 set_zspage_mapping(struct page *page, unsigned int class_idx, enum fullness_group fullness) argument 379 get_fullness_group(struct page *page) argument 406 insert_zspage(struct page *page, struct size_class *class, enum fullness_group fullness) argument 427 remove_zspage(struct page *page, struct size_class *class, enum fullness_group fullness) argument 457 fix_fullness_group(struct zs_pool *pool, struct page *page) argument 520 get_first_page(struct page *page) argument 528 get_next_page(struct page *page) argument 548 obj_location_to_handle(struct page *page, unsigned long obj_idx) argument 568 obj_handle_to_location(unsigned long handle, struct page **page, unsigned long *obj_idx) argument 575 obj_idx_to_offset(struct page *page, unsigned long obj_idx, int class_size) argument 586 reset_page(struct page *page) argument 625 struct page *page = first_page; local 684 struct page *page; local 728 struct page *page; local 1107 struct page *page; local 1149 struct page *page; local [all...] |
H A D | memory-failure.c | 16 * Handles page cache pages in various states. The tricky part 17 * here is that we can access any page asynchronously in respect to 40 #include <linux/page-flags.h> 41 #include <linux/kernel-page-flags.h> 50 #include <linux/page-isolation.h> 79 static int hwpoison_filter_dev(struct page *p) 109 static int hwpoison_filter_flags(struct page *p) 128 * can only guarantee that the page either belongs to the memcg tasks, or is 129 * a freed page. 134 static int hwpoison_filter_task(struct page * 190 kill_proc(struct task_struct *t, unsigned long addr, int trapno, unsigned long pfn, struct page *page, int flags) argument 346 kill_procs(struct list_head *to_kill, int forcekill, int trapno, int fail, struct page *page, unsigned long pfn, int flags) argument 426 collect_procs_anon(struct page *page, struct list_head *to_kill, struct to_kill **tkc, int force_early) argument 462 collect_procs_file(struct page *page, struct list_head *to_kill, struct to_kill **tkc, int force_early) argument 500 collect_procs(struct page *page, struct list_head *tokill, int force_early) argument 1396 struct page *page; local 1511 get_any_page(struct page *page, unsigned long pfn, int flags) argument 1535 soft_offline_huge_page(struct page *page, int flags) argument 1585 __soft_offline_page(struct page *page, int flags) argument 1700 soft_offline_page(struct page *page, int flags) argument [all...] |
H A D | gup.c | 19 static struct page *no_page_table(struct vm_area_struct *vma, 25 * page tables. Return error instead of NULL to skip handle_mm_fault, 35 static struct page *follow_page_pte(struct vm_area_struct *vma, 39 struct page *page; local 52 * KSM's break_ksm() relies upon recognizing a ksm page 74 page = vm_normal_page(vma, address, pte); 75 if (unlikely(!page)) { 79 page = pte_page(pte); 83 get_page_foll(page); 151 struct page *page; local 221 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument 448 struct page *page; local 673 struct page *page; local 739 struct page *page; local 792 struct page *head, *page, *tail; local 803 VM_BUG_ON_PAGE(compound_head(page) != head, page); local 839 struct page *head, *page, *tail; local 850 VM_BUG_ON_PAGE(compound_head(page) != head, page); local [all...] |
H A D | slub.c | 45 * 3. slab_lock(page) (Only on some arches and for debugging) 54 * double word in the page struct. Meaning 55 * A. page->freelist -> List of object free in a page 56 * B. page->counters -> Counters of objects 57 * C. page->frozen -> frozen state 61 * perform list operations on the page. Other processors may put objects 63 * one that can retrieve the objects from the page's freelist. 91 * minimal so we rely on the page allocators per cpu caches for 94 * Overloading of page flag 227 check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) argument 338 slab_lock(struct page *page) argument 343 slab_unlock(struct page *page) argument 348 set_page_slub_counters(struct page *page, unsigned long counters_new) argument 364 __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) argument 401 cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) argument 449 get_map(struct kmem_cache *s, struct page *page, unsigned long *map) argument 561 print_page_info(struct page *page) argument 596 print_trailer(struct kmem_cache *s, struct page *page, u8 *p) argument 632 object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason) argument 639 slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) argument 673 check_bytes_and_report(struct kmem_cache *s, struct page *page, u8 *object, char *what, u8 *start, unsigned int value, unsigned int bytes) argument 735 check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) argument 755 slab_pad_check(struct kmem_cache *s, struct page *page) argument 786 check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) argument 838 check_slab(struct kmem_cache *s, struct page *page) argument 869 on_freelist(struct kmem_cache *s, struct page *page, void *search) argument 918 trace(struct kmem_cache *s, struct page *page, void *object, int alloc) argument 939 add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) argument 949 remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) argument 995 setup_object_debug(struct kmem_cache *s, struct page *page, void *object) argument 1005 alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) argument 1041 free_debug_processing( struct kmem_cache *s, struct page *page, void *object, unsigned long addr, unsigned long *flags) argument 1183 setup_object_debug(struct kmem_cache *s, struct page *page, void *object) argument 1186 alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) argument 1189 free_debug_processing( struct kmem_cache *s, struct page *page, void *object, unsigned long addr, unsigned long *flags) argument 1193 slab_pad_check(struct kmem_cache *s, struct page *page) argument 1195 check_object(struct kmem_cache *s, struct page *page, void *object, u8 val) argument 1197 add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) argument 1199 remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) argument 1282 struct page *page; local 1303 struct page *page; local 1364 setup_object(struct kmem_cache *s, struct page *page, void *object) argument 1374 struct page *page; local 1414 __free_slab(struct kmem_cache *s, struct page *page) argument 1450 struct page *page; local 1460 free_slab(struct kmem_cache *s, struct page *page) argument 1483 discard_slab(struct kmem_cache *s, struct page *page) argument 1493 __add_partial(struct kmem_cache_node *n, struct page *page, int tail) argument 1502 add_partial(struct kmem_cache_node *n, struct page *page, int tail) argument 1510 __remove_partial(struct kmem_cache_node *n, struct page *page) argument 1516 remove_partial(struct kmem_cache_node *n, struct page *page) argument 1529 acquire_slab(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page, int mode, int *objects) argument 1578 struct page *page, *page2; local 1776 deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) argument 1930 struct page *page, *discard_page = NULL; local 1995 put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) argument 2089 node_match(struct page *page, int node) argument 2099 count_free(struct page *page) argument 2116 struct page *page; local 2168 struct page *page; local 2197 pfmemalloc_match(struct page *page, gfp_t gfpflags) argument 2215 get_freelist(struct kmem_cache *s, struct page *page) argument 2259 struct page *page; local 2380 struct page *page; local 2515 __slab_free(struct kmem_cache *s, struct page *page, void *x, unsigned long addr) argument 2639 slab_free(struct kmem_cache *s, struct page *page, void *x, unsigned long addr) argument 2861 struct page *page; local 3136 list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) argument 3169 struct page *page, *h; local 3263 struct page *page; local 3306 struct page *page; local 3324 struct page *page; local 3358 struct page *page; local 3764 count_inuse(struct page *page) argument 3769 count_total(struct page *page) argument 3776 validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) argument 3803 validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) argument 3815 struct page *page; local 3987 process_slab(struct loc_track *t, struct kmem_cache *s, struct page *page, enum track_item alloc, unsigned long *map) argument 4023 struct page *page; local 4185 struct page *page; local 4430 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; local 4442 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; local [all...] |
H A D | readahead.c | 35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 38 * see if a page needs releasing upon read_cache_pages() failure 45 struct page *page) 47 if (page_has_private(page)) { 48 if (!trylock_page(page)) 50 page->mapping = mapping; 51 do_invalidatepage(page, 0, PAGE_CACHE_SIZE); 52 page->mapping = NULL; 53 unlock_page(page); 44 read_cache_pages_invalidate_page(struct address_space *mapping, struct page *page) argument 86 struct page *page; local 128 struct page *page = list_to_page(pages); local 157 struct page *page; local 524 page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *page, pgoff_t offset, unsigned long req_size) argument [all...] |
H A D | page_alloc.c | 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 50 #include <linux/page-isolation.h> 59 #include <linux/page-debug-flags.h> 165 static void __free_pages_ok(struct page *page, unsigned int order); 248 void set_pageblock_migratetype(struct page *page, int migratetype) argument 254 set_pageblock_flags_group(page, (unsigned long)migratetype, 261 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) argument 284 page_is_consistent(struct zone *zone, struct page *page) argument 296 bad_range(struct zone *zone, struct page *page) argument 306 bad_range(struct zone *zone, struct page *page) argument 312 bad_page(struct page *page, const char *reason, unsigned long bad_flags) argument 372 free_compound_page(struct page *page) argument 377 prep_compound_page(struct page *page, unsigned long order) argument 396 destroy_compound_page(struct page *page, unsigned long order) argument 425 prep_zero_page(struct page *page, unsigned int order, gfp_t gfp_flags) argument 456 set_page_guard_flag(struct page *page) argument 461 clear_page_guard_flag(struct page *page) argument 466 set_page_guard_flag(struct page *page) argument 467 clear_page_guard_flag(struct page *page) argument 470 set_page_order(struct page *page, unsigned int order) argument 476 rmv_page_order(struct page *page) argument 497 page_is_buddy(struct page *page, struct page *buddy, unsigned int order) argument 553 __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype) argument 586 VM_BUG_ON_PAGE(bad_range(zone, page), page); local 642 free_pages_check(struct page *page) argument 694 struct page *page; local 733 free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype) argument 752 free_pages_prepare(struct page *page, unsigned int order) argument 779 __free_pages_ok(struct page *page, unsigned int order) argument 792 free_one_page(page_zone(page), page, pfn, order, migratetype); local 796 __free_pages_bootmem(struct page *page, unsigned int order) argument 818 init_cma_reserved_pageblock(struct page *page) argument 861 expand(struct zone *zone, struct page *page, int low, int high, struct free_area *area, int migratetype) argument 871 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); local 899 check_new_page(struct page *page) argument 923 prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) argument 958 struct page *page; local 1008 struct page *page; local 1025 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); local 1048 move_freepages_block(struct zone *zone, struct page *page, int migratetype) argument 1092 try_to_steal_freepages(struct zone *zone, struct page *page, int start_type, int fallback_type) argument 1138 struct page *page; local 1194 struct page *page; local 1230 struct page *page = __rmqueue(zone, order, migratetype); local 1379 struct page *page = pfn_to_page(pfn); local 1402 free_hot_cold_page(struct page *page, bool cold) argument 1454 struct page *page, *next; local 1470 split_page(struct page *page, unsigned int order) argument 1474 VM_BUG_ON_PAGE(PageCompound(page), page); local 1491 __isolate_free_page(struct page *page, unsigned int order) argument 1540 split_free_page(struct page *page) argument 1568 struct page *page; local 1626 VM_BUG_ON_PAGE(bad_range(zone, page), page); local 1954 struct page *page = NULL; local 2250 struct page *page; local 2314 struct page *page; local 2425 struct page *page = NULL; local 2467 struct page *page; local 2556 struct page *page = NULL; local 2801 struct page *page = NULL; local 2875 struct page *page; local 2896 __free_pages(struct page *page, unsigned int order) argument 2927 struct page *page; local 2939 struct page *page; local 2953 __free_kmem_pages(struct page *page, unsigned int order) argument 4010 struct page *page; local 4105 struct page *page; local 5432 adjust_managed_page_count(struct page *page, long count) argument 5467 free_highmem_page(struct page *page) argument 6094 get_pfnblock_flags_mask(struct page *page, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) argument 6122 set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long end_bitidx, unsigned long mask) argument 6163 has_unmovable_pages(struct zone *zone, struct page *page, int count, bool skip_hwpoisoned_pages) argument 6238 is_pageblock_removable_nolock(struct page *page) argument 6451 struct page *page = pfn_to_page(pfn); local 6502 struct page *page; local 6551 is_free_buddy_page(struct page *page) argument [all...] |
H A D | mmzone.c | 4 * management codes for pgdats, zones and page flags 78 struct page *page, struct zone *zone) 80 if (page_to_pfn(page) != pfn) 83 if (page_zone(page) != zone) 101 int page_cpupid_xchg_last(struct page *page, int cpupid) argument 107 old_flags = flags = page->flags; 108 last_cpupid = page_cpupid_last(page); 112 } while (unlikely(cmpxchg(&page 77 memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) argument [all...] |
H A D | ksm.c | 64 * by their contents. Because each such page is write-protected, searching on 87 * take 10 attempts to find a page in the unstable tree, once it is found, 88 * it is secured in the stable tree. (When we scan a new page, we first 127 * @node: rb node of this ksm page in the stable tree 130 * @hlist: hlist head of rmap_items using this ksm page 131 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 153 * @nid: NUMA node id of unstable tree in which linked (may not match page) 156 * @oldchecksum: previous checksum of the page at that virtual address 211 /* The number of page slot 365 struct page *page; local 444 page_trans_compound_anon(struct page *page) argument 463 struct page *page; local 538 struct page *page; local 617 struct page *page; local 704 struct page *page; local 828 calc_checksum(struct page *page) argument 855 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) argument 929 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) argument 979 page_trans_compound_anon_split(struct page *page) argument 1015 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) argument 1082 try_to_merge_with_ksm_page(struct rmap_item *rmap_item, struct page *page, struct page *kpage) argument 1121 try_to_merge_two_pages(struct rmap_item *rmap_item, struct page *page, struct rmap_item *tree_rmap_item, struct page *tree_page) argument 1151 stable_tree_search(struct page *page) argument 1322 unstable_tree_search_insert(struct rmap_item *rmap_item, struct page *page, struct page **tree_pagep) argument 1415 cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) argument 1535 scan_get_next_rmap_item(struct page **page) argument 1569 struct page *page; local 1862 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1893 rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) argument [all...] |
H A D | vmscan.c | 107 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 113 struct page *prev; \ 127 struct page *prev; \ 389 * Here we assume it costs one seek to replace a lru page and that it also 401 * slab reclaim versus page reclaim. 447 static inline int is_page_cache_freeable(struct page *page) argument 450 * A freeable page cache page is referenced only by the caller 451 * that isolated the page, th 481 handle_write_error(struct address_space *mapping, struct page *page, int error) argument 506 pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) argument 581 __remove_mapping(struct address_space *mapping, struct page *page, bool reclaimed) argument 665 remove_mapping(struct address_space *mapping, struct page *page) argument 688 putback_lru_page(struct page *page) argument 693 VM_BUG_ON_PAGE(PageLRU(page), page); local 758 page_check_references(struct page *page, struct scan_control *sc) argument 814 page_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) argument 870 struct page *page; local 884 VM_BUG_ON_PAGE(page_zone(page) != zone, page); local 1168 VM_BUG_ON_PAGE(PageActive(page), page); local 1201 struct page *page, *next; local 1230 __isolate_lru_page(struct page *page, isolate_mode_t mode) argument 1324 struct page *page; local 1381 isolate_lru_page(struct page *page) argument 1453 struct page *page = lru_to_page(page_list); local 1456 VM_BUG_ON_PAGE(PageLRU(page), page); local 1684 struct page *page; local 1691 VM_BUG_ON_PAGE(PageLRU(page), page); local 1729 struct page *page; local 3786 page_evictable(struct page *page) argument 3810 struct page *page = pages[i]; local 3829 VM_BUG_ON_PAGE(PageActive(page), page); local [all...] |
H A D | huge_memory.c | 36 * Defrag is invoked by khugepaged hugepage allocations and by page faults 63 * it would have happened if the vma was large enough during page 174 static struct page *huge_zero_page __read_mostly; 176 static inline bool is_huge_zero_page(struct page *page) argument 178 return ACCESS_ONCE(huge_zero_page) == page; 186 static struct page *get_huge_zero_page(void) 188 struct page *zero_page; 225 /* we can free zero page only if last reference remains */ 233 struct page *zero_pag 705 mk_huge_pmd(struct page *page, pgprot_t prot) argument 713 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) argument 799 struct page *page; local 953 get_user_huge_page(struct page *page) argument 966 put_user_huge_page(struct page *page) argument 978 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) argument 1092 struct page *page = NULL, *new_page; local 1214 struct page *page = NULL; local 1269 struct page *page; local 1394 struct page *page; local 1523 struct page *page = pmd_page(*pmd); local 1577 page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, enum page_check_address_pmd_flag flag, spinlock_t **ptl) argument 1623 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1655 __split_huge_page_refcount(struct page *page, struct list_head *list) argument 1775 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1852 __split_huge_page(struct page *page, struct anon_vma *anon_vma, struct list_head *list) argument 1909 split_huge_page_to_list(struct page *page, struct list_head *list) argument 2127 release_pte_page(struct page *page) argument 2148 struct page *page; local 2192 VM_BUG_ON_PAGE(PageLRU(page), page); local 2206 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) argument 2553 struct page *page; local 2590 VM_BUG_ON_PAGE(PageCompound(page), page); local 2861 struct page *page; local [all...] |