internal.h revision 5344b7e648980cc2ca613ec03a56a8222ff48820
1/* internal.h: mm/ internal definitions 2 * 3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11#ifndef __MM_INTERNAL_H 12#define __MM_INTERNAL_H 13 14#include <linux/mm.h> 15 16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 17 unsigned long floor, unsigned long ceiling); 18 19extern void prep_compound_page(struct page *page, unsigned long order); 20 21static inline void set_page_count(struct page *page, int v) 22{ 23 atomic_set(&page->_count, v); 24} 25 26/* 27 * Turn a non-refcounted page (->_count == 0) into refcounted with 28 * a count of one. 29 */ 30static inline void set_page_refcounted(struct page *page) 31{ 32 VM_BUG_ON(PageTail(page)); 33 VM_BUG_ON(atomic_read(&page->_count)); 34 set_page_count(page, 1); 35} 36 37static inline void __put_page(struct page *page) 38{ 39 atomic_dec(&page->_count); 40} 41 42/* 43 * in mm/vmscan.c: 44 */ 45extern int isolate_lru_page(struct page *page); 46extern void putback_lru_page(struct page *page); 47 48/* 49 * in mm/page_alloc.c 50 */ 51extern void __free_pages_bootmem(struct page *page, unsigned int order); 52 53/* 54 * function for dealing with page's order in buddy system. 55 * zone->lock is already acquired when we use these. 56 * So, we don't need atomic page->flags operations here. 57 */ 58static inline unsigned long page_order(struct page *page) 59{ 60 VM_BUG_ON(!PageBuddy(page)); 61 return page_private(page); 62} 63 64extern long mlock_vma_pages_range(struct vm_area_struct *vma, 65 unsigned long start, unsigned long end); 66extern void munlock_vma_pages_range(struct vm_area_struct *vma, 67 unsigned long start, unsigned long end); 68static inline void munlock_vma_pages_all(struct vm_area_struct *vma) 69{ 70 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 71} 72 73#ifdef CONFIG_UNEVICTABLE_LRU 74/* 75 * unevictable_migrate_page() called only from migrate_page_copy() to 76 * migrate unevictable flag to new page. 77 * Note that the old page has been isolated from the LRU lists at this 78 * point so we don't need to worry about LRU statistics. 79 */ 80static inline void unevictable_migrate_page(struct page *new, struct page *old) 81{ 82 if (TestClearPageUnevictable(old)) 83 SetPageUnevictable(new); 84} 85#else 86static inline void unevictable_migrate_page(struct page *new, struct page *old) 87{ 88} 89#endif 90 91#ifdef CONFIG_UNEVICTABLE_LRU 92/* 93 * Called only in fault path via page_evictable() for a new page 94 * to determine if it's being mapped into a LOCKED vma. 95 * If so, mark page as mlocked. 96 */ 97static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) 98{ 99 VM_BUG_ON(PageLRU(page)); 100 101 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 102 return 0; 103 104 if (!TestSetPageMlocked(page)) { 105 inc_zone_page_state(page, NR_MLOCK); 106 count_vm_event(UNEVICTABLE_PGMLOCKED); 107 } 108 return 1; 109} 110 111/* 112 * must be called with vma's mmap_sem held for read, and page locked. 113 */ 114extern void mlock_vma_page(struct page *page); 115 116/* 117 * Clear the page's PageMlocked(). This can be useful in a situation where 118 * we want to unconditionally remove a page from the pagecache -- e.g., 119 * on truncation or freeing. 120 * 121 * It is legal to call this function for any page, mlocked or not. 122 * If called for a page that is still mapped by mlocked vmas, all we do 123 * is revert to lazy LRU behaviour -- semantics are not broken. 124 */ 125extern void __clear_page_mlock(struct page *page); 126static inline void clear_page_mlock(struct page *page) 127{ 128 if (unlikely(TestClearPageMlocked(page))) 129 __clear_page_mlock(page); 130} 131 132/* 133 * mlock_migrate_page - called only from migrate_page_copy() to 134 * migrate the Mlocked page flag; update statistics. 135 */ 136static inline void mlock_migrate_page(struct page *newpage, struct page *page) 137{ 138 if (TestClearPageMlocked(page)) { 139 unsigned long flags; 140 141 local_irq_save(flags); 142 __dec_zone_page_state(page, NR_MLOCK); 143 SetPageMlocked(newpage); 144 __inc_zone_page_state(newpage, NR_MLOCK); 145 local_irq_restore(flags); 146 } 147} 148 149 150#else /* CONFIG_UNEVICTABLE_LRU */ 151static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 152{ 153 return 0; 154} 155static inline void clear_page_mlock(struct page *page) { } 156static inline void mlock_vma_page(struct page *page) { } 157static inline void mlock_migrate_page(struct page *new, struct page *old) { } 158 159#endif /* CONFIG_UNEVICTABLE_LRU */ 160 161/* 162 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, 163 * so all functions starting at paging_init should be marked __init 164 * in those cases. SPARSEMEM, however, allows for memory hotplug, 165 * and alloc_bootmem_node is not used. 166 */ 167#ifdef CONFIG_SPARSEMEM 168#define __paginginit __meminit 169#else 170#define __paginginit __init 171#endif 172 173/* Memory initialisation debug and verification */ 174enum mminit_level { 175 MMINIT_WARNING, 176 MMINIT_VERIFY, 177 MMINIT_TRACE 178}; 179 180#ifdef CONFIG_DEBUG_MEMORY_INIT 181 182extern int mminit_loglevel; 183 184#define mminit_dprintk(level, prefix, fmt, arg...) \ 185do { \ 186 if (level < mminit_loglevel) { \ 187 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ 188 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ 189 } \ 190} while (0) 191 192extern void mminit_verify_pageflags_layout(void); 193extern void mminit_verify_page_links(struct page *page, 194 enum zone_type zone, unsigned long nid, unsigned long pfn); 195extern void mminit_verify_zonelist(void); 196 197#else 198 199static inline void mminit_dprintk(enum mminit_level level, 200 const char *prefix, const char *fmt, ...) 201{ 202} 203 204static inline void mminit_verify_pageflags_layout(void) 205{ 206} 207 208static inline void mminit_verify_page_links(struct page *page, 209 enum zone_type zone, unsigned long nid, unsigned long pfn) 210{ 211} 212 213static inline void mminit_verify_zonelist(void) 214{ 215} 216#endif /* CONFIG_DEBUG_MEMORY_INIT */ 217 218/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ 219#if defined(CONFIG_SPARSEMEM) 220extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, 221 unsigned long *end_pfn); 222#else 223static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, 224 unsigned long *end_pfn) 225{ 226} 227#endif /* CONFIG_SPARSEMEM */ 228 229#define GUP_FLAGS_WRITE 0x1 230#define GUP_FLAGS_FORCE 0x2 231#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 232 233int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 234 unsigned long start, int len, int flags, 235 struct page **pages, struct vm_area_struct **vmas); 236 237#endif 238