1#ifndef _LINUX_MM_H 2#define _LINUX_MM_H 3 4#include <linux/errno.h> 5 6#ifdef __KERNEL__ 7 8#include <linux/mmdebug.h> 9#include <linux/gfp.h> 10#include <linux/bug.h> 11#include <linux/list.h> 12#include <linux/mmzone.h> 13#include <linux/rbtree.h> 14#include <linux/atomic.h> 15#include <linux/debug_locks.h> 16#include <linux/mm_types.h> 17#include <linux/range.h> 18#include <linux/pfn.h> 19#include <linux/bit_spinlock.h> 20#include <linux/shrinker.h> 21#include <linux/resource.h> 22 23struct mempolicy; 24struct anon_vma; 25struct anon_vma_chain; 26struct file_ra_state; 27struct user_struct; 28struct writeback_control; 29 30#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 31extern unsigned long max_mapnr; 32 33static inline void set_max_mapnr(unsigned long limit) 34{ 35 max_mapnr = limit; 36} 37#else 38static inline void set_max_mapnr(unsigned long limit) { } 39#endif 40 41extern unsigned long totalram_pages; 42extern void * high_memory; 43extern int page_cluster; 44 45#ifdef CONFIG_SYSCTL 46extern int sysctl_legacy_va_layout; 47#else 48#define sysctl_legacy_va_layout 0 49#endif 50 51#include <asm/page.h> 52#include <asm/pgtable.h> 53#include <asm/processor.h> 54 55#ifndef __pa_symbol 56#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 57#endif 58 59extern unsigned long sysctl_user_reserve_kbytes; 60extern unsigned long sysctl_admin_reserve_kbytes; 61 62extern int sysctl_overcommit_memory; 63extern int sysctl_overcommit_ratio; 64extern unsigned long sysctl_overcommit_kbytes; 65 66extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 67 size_t *, loff_t *); 68extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 69 size_t *, loff_t *); 70 71#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 72 73/* to align the pointer to the (next) page boundary */ 74#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 75 76/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 77#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE) 78 79/* 80 * Linux kernel virtual memory manager primitives. 81 * The idea being to have a "virtual" mm in the same way 82 * we have a virtual fs - giving a cleaner interface to the 83 * mm details, and allowing different kinds of memory mappings 84 * (from shared memory to executable loading to arbitrary 85 * mmap() functions). 86 */ 87 88extern struct kmem_cache *vm_area_cachep; 89 90#ifndef CONFIG_MMU 91extern struct rb_root nommu_region_tree; 92extern struct rw_semaphore nommu_region_sem; 93 94extern unsigned int kobjsize(const void *objp); 95#endif 96 97/* 98 * vm_flags in vm_area_struct, see mm_types.h. 99 */ 100#define VM_NONE 0x00000000 101 102#define VM_READ 0x00000001 /* currently active flags */ 103#define VM_WRITE 0x00000002 104#define VM_EXEC 0x00000004 105#define VM_SHARED 0x00000008 106 107/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 108#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 109#define VM_MAYWRITE 0x00000020 110#define VM_MAYEXEC 0x00000040 111#define VM_MAYSHARE 0x00000080 112 113#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 114#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 115#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 116 117#define VM_LOCKED 0x00002000 118#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 119 120 /* Used by sys_madvise() */ 121#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 122#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 123 124#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 125#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 126#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 127#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 128#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 129#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 130#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 131#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 132 133#ifdef CONFIG_MEM_SOFT_DIRTY 134# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 135#else 136# define VM_SOFTDIRTY 0 137#endif 138 139#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 140#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 141#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 142#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 143 144#if defined(CONFIG_X86) 145# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 146#elif defined(CONFIG_PPC) 147# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 148#elif defined(CONFIG_PARISC) 149# define VM_GROWSUP VM_ARCH_1 150#elif defined(CONFIG_METAG) 151# define VM_GROWSUP VM_ARCH_1 152#elif defined(CONFIG_IA64) 153# define VM_GROWSUP VM_ARCH_1 154#elif !defined(CONFIG_MMU) 155# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 156#endif 157 158#ifndef VM_GROWSUP 159# define VM_GROWSUP VM_NONE 160#endif 161 162/* Bits set in the VMA until the stack is in its final location */ 163#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 164 165#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 166#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 167#endif 168 169#ifdef CONFIG_STACK_GROWSUP 170#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 171#else 172#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 173#endif 174 175/* 176 * Special vmas that are non-mergable, non-mlock()able. 177 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 178 */ 179#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 180 181/* This mask defines which mm->def_flags a process can inherit its parent */ 182#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 183 184/* 185 * mapping from the currently active vm_flags protection bits (the 186 * low four bits) to a page protection mask.. 187 */ 188extern pgprot_t protection_map[16]; 189 190#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 191#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 192#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 193#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 194#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 195#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ 196#define FAULT_FLAG_TRIED 0x40 /* second try */ 197#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */ 198 199/* 200 * vm_fault is filled by the the pagefault handler and passed to the vma's 201 * ->fault function. The vma's ->fault is responsible for returning a bitmask 202 * of VM_FAULT_xxx flags that give details about how the fault was handled. 203 * 204 * pgoff should be used in favour of virtual_address, if possible. If pgoff 205 * is used, one may implement ->remap_pages to get nonlinear mapping support. 206 */ 207struct vm_fault { 208 unsigned int flags; /* FAULT_FLAG_xxx flags */ 209 pgoff_t pgoff; /* Logical page offset based on vma */ 210 void __user *virtual_address; /* Faulting virtual address */ 211 212 struct page *page; /* ->fault handlers should return a 213 * page here, unless VM_FAULT_NOPAGE 214 * is set (which is also implied by 215 * VM_FAULT_ERROR). 216 */ 217 /* for ->map_pages() only */ 218 pgoff_t max_pgoff; /* map pages for offset from pgoff till 219 * max_pgoff inclusive */ 220 pte_t *pte; /* pte entry associated with ->pgoff */ 221}; 222 223/* 224 * These are the virtual MM functions - opening of an area, closing and 225 * unmapping it (needed to keep files on disk up-to-date etc), pointer 226 * to the functions called when a no-page or a wp-page exception occurs. 227 */ 228struct vm_operations_struct { 229 void (*open)(struct vm_area_struct * area); 230 void (*close)(struct vm_area_struct * area); 231 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 232 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 233 234 /* notification that a previously read-only page is about to become 235 * writable, if an error is returned it will cause a SIGBUS */ 236 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 237 238 /* called by access_process_vm when get_user_pages() fails, typically 239 * for use by special VMAs that can switch between memory and hardware 240 */ 241 int (*access)(struct vm_area_struct *vma, unsigned long addr, 242 void *buf, int len, int write); 243 244 /* Called by the /proc/PID/maps code to ask the vma whether it 245 * has a special name. Returning non-NULL will also cause this 246 * vma to be dumped unconditionally. */ 247 const char *(*name)(struct vm_area_struct *vma); 248 249#ifdef CONFIG_NUMA 250 /* 251 * set_policy() op must add a reference to any non-NULL @new mempolicy 252 * to hold the policy upon return. Caller should pass NULL @new to 253 * remove a policy and fall back to surrounding context--i.e. do not 254 * install a MPOL_DEFAULT policy, nor the task or system default 255 * mempolicy. 256 */ 257 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 258 259 /* 260 * get_policy() op must add reference [mpol_get()] to any policy at 261 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 262 * in mm/mempolicy.c will do this automatically. 263 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 264 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 265 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 266 * must return NULL--i.e., do not "fallback" to task or system default 267 * policy. 268 */ 269 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 270 unsigned long addr); 271 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, 272 const nodemask_t *to, unsigned long flags); 273#endif 274 /* called by sys_remap_file_pages() to populate non-linear mapping */ 275 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 276 unsigned long size, pgoff_t pgoff); 277}; 278 279struct mmu_gather; 280struct inode; 281 282#define page_private(page) ((page)->private) 283#define set_page_private(page, v) ((page)->private = (v)) 284 285/* It's valid only if the page is free path or free_list */ 286static inline void set_freepage_migratetype(struct page *page, int migratetype) 287{ 288 page->index = migratetype; 289} 290 291/* It's valid only if the page is free path or free_list */ 292static inline int get_freepage_migratetype(struct page *page) 293{ 294 return page->index; 295} 296 297/* 298 * FIXME: take this include out, include page-flags.h in 299 * files which need it (119 of them) 300 */ 301#include <linux/page-flags.h> 302#include <linux/huge_mm.h> 303 304/* 305 * Methods to modify the page usage count. 306 * 307 * What counts for a page usage: 308 * - cache mapping (page->mapping) 309 * - private data (page->private) 310 * - page mapped in a task's page tables, each mapping 311 * is counted separately 312 * 313 * Also, many kernel routines increase the page count before a critical 314 * routine so they can be sure the page doesn't go away from under them. 315 */ 316 317/* 318 * Drop a ref, return true if the refcount fell to zero (the page has no users) 319 */ 320static inline int put_page_testzero(struct page *page) 321{ 322 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); 323 return atomic_dec_and_test(&page->_count); 324} 325 326/* 327 * Try to grab a ref unless the page has a refcount of zero, return false if 328 * that is the case. 329 * This can be called when MMU is off so it must not access 330 * any of the virtual mappings. 331 */ 332static inline int get_page_unless_zero(struct page *page) 333{ 334 return atomic_inc_not_zero(&page->_count); 335} 336 337/* 338 * Try to drop a ref unless the page has a refcount of one, return false if 339 * that is the case. 340 * This is to make sure that the refcount won't become zero after this drop. 341 * This can be called when MMU is off so it must not access 342 * any of the virtual mappings. 343 */ 344static inline int put_page_unless_one(struct page *page) 345{ 346 return atomic_add_unless(&page->_count, -1, 1); 347} 348 349extern int page_is_ram(unsigned long pfn); 350extern int region_is_ram(resource_size_t phys_addr, unsigned long size); 351 352/* Support for virtually mapped pages */ 353struct page *vmalloc_to_page(const void *addr); 354unsigned long vmalloc_to_pfn(const void *addr); 355 356/* 357 * Determine if an address is within the vmalloc range 358 * 359 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 360 * is no special casing required. 361 */ 362static inline int is_vmalloc_addr(const void *x) 363{ 364#ifdef CONFIG_MMU 365 unsigned long addr = (unsigned long)x; 366 367 return addr >= VMALLOC_START && addr < VMALLOC_END; 368#else 369 return 0; 370#endif 371} 372#ifdef CONFIG_MMU 373extern int is_vmalloc_or_module_addr(const void *x); 374#else 375static inline int is_vmalloc_or_module_addr(const void *x) 376{ 377 return 0; 378} 379#endif 380 381extern void kvfree(const void *addr); 382 383static inline void compound_lock(struct page *page) 384{ 385#ifdef CONFIG_TRANSPARENT_HUGEPAGE 386 VM_BUG_ON_PAGE(PageSlab(page), page); 387 bit_spin_lock(PG_compound_lock, &page->flags); 388#endif 389} 390 391static inline void compound_unlock(struct page *page) 392{ 393#ifdef CONFIG_TRANSPARENT_HUGEPAGE 394 VM_BUG_ON_PAGE(PageSlab(page), page); 395 bit_spin_unlock(PG_compound_lock, &page->flags); 396#endif 397} 398 399static inline unsigned long compound_lock_irqsave(struct page *page) 400{ 401 unsigned long uninitialized_var(flags); 402#ifdef CONFIG_TRANSPARENT_HUGEPAGE 403 local_irq_save(flags); 404 compound_lock(page); 405#endif 406 return flags; 407} 408 409static inline void compound_unlock_irqrestore(struct page *page, 410 unsigned long flags) 411{ 412#ifdef CONFIG_TRANSPARENT_HUGEPAGE 413 compound_unlock(page); 414 local_irq_restore(flags); 415#endif 416} 417 418static inline struct page *compound_head_by_tail(struct page *tail) 419{ 420 struct page *head = tail->first_page; 421 422 /* 423 * page->first_page may be a dangling pointer to an old 424 * compound page, so recheck that it is still a tail 425 * page before returning. 426 */ 427 smp_rmb(); 428 if (likely(PageTail(tail))) 429 return head; 430 return tail; 431} 432 433static inline struct page *compound_head(struct page *page) 434{ 435 if (unlikely(PageTail(page))) 436 return compound_head_by_tail(page); 437 return page; 438} 439 440/* 441 * The atomic page->_mapcount, starts from -1: so that transitions 442 * both from it and to it can be tracked, using atomic_inc_and_test 443 * and atomic_add_negative(-1). 444 */ 445static inline void page_mapcount_reset(struct page *page) 446{ 447 atomic_set(&(page)->_mapcount, -1); 448} 449 450static inline int page_mapcount(struct page *page) 451{ 452 return atomic_read(&(page)->_mapcount) + 1; 453} 454 455static inline int page_count(struct page *page) 456{ 457 return atomic_read(&compound_head(page)->_count); 458} 459 460#ifdef CONFIG_HUGETLB_PAGE 461extern int PageHeadHuge(struct page *page_head); 462#else /* CONFIG_HUGETLB_PAGE */ 463static inline int PageHeadHuge(struct page *page_head) 464{ 465 return 0; 466} 467#endif /* CONFIG_HUGETLB_PAGE */ 468 469static inline bool __compound_tail_refcounted(struct page *page) 470{ 471 return !PageSlab(page) && !PageHeadHuge(page); 472} 473 474/* 475 * This takes a head page as parameter and tells if the 476 * tail page reference counting can be skipped. 477 * 478 * For this to be safe, PageSlab and PageHeadHuge must remain true on 479 * any given page where they return true here, until all tail pins 480 * have been released. 481 */ 482static inline bool compound_tail_refcounted(struct page *page) 483{ 484 VM_BUG_ON_PAGE(!PageHead(page), page); 485 return __compound_tail_refcounted(page); 486} 487 488static inline void get_huge_page_tail(struct page *page) 489{ 490 /* 491 * __split_huge_page_refcount() cannot run from under us. 492 */ 493 VM_BUG_ON_PAGE(!PageTail(page), page); 494 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 495 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 496 if (compound_tail_refcounted(page->first_page)) 497 atomic_inc(&page->_mapcount); 498} 499 500extern bool __get_page_tail(struct page *page); 501 502static inline void get_page(struct page *page) 503{ 504 if (unlikely(PageTail(page))) 505 if (likely(__get_page_tail(page))) 506 return; 507 /* 508 * Getting a normal page or the head of a compound page 509 * requires to already have an elevated page->_count. 510 */ 511 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 512 atomic_inc(&page->_count); 513} 514 515static inline struct page *virt_to_head_page(const void *x) 516{ 517 struct page *page = virt_to_page(x); 518 return compound_head(page); 519} 520 521/* 522 * Setup the page count before being freed into the page allocator for 523 * the first time (boot or memory hotplug) 524 */ 525static inline void init_page_count(struct page *page) 526{ 527 atomic_set(&page->_count, 1); 528} 529 530/* 531 * PageBuddy() indicate that the page is free and in the buddy system 532 * (see mm/page_alloc.c). 533 * 534 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to 535 * -2 so that an underflow of the page_mapcount() won't be mistaken 536 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very 537 * efficiently by most CPU architectures. 538 */ 539#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) 540 541static inline int PageBuddy(struct page *page) 542{ 543 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; 544} 545 546static inline void __SetPageBuddy(struct page *page) 547{ 548 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 549 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 550} 551 552static inline void __ClearPageBuddy(struct page *page) 553{ 554 VM_BUG_ON_PAGE(!PageBuddy(page), page); 555 atomic_set(&page->_mapcount, -1); 556} 557 558#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) 559 560static inline int PageBalloon(struct page *page) 561{ 562 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; 563} 564 565static inline void __SetPageBalloon(struct page *page) 566{ 567 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 568 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); 569} 570 571static inline void __ClearPageBalloon(struct page *page) 572{ 573 VM_BUG_ON_PAGE(!PageBalloon(page), page); 574 atomic_set(&page->_mapcount, -1); 575} 576 577void put_page(struct page *page); 578void put_pages_list(struct list_head *pages); 579 580void split_page(struct page *page, unsigned int order); 581int split_free_page(struct page *page); 582 583/* 584 * Compound pages have a destructor function. Provide a 585 * prototype for that function and accessor functions. 586 * These are _only_ valid on the head of a PG_compound page. 587 */ 588typedef void compound_page_dtor(struct page *); 589 590static inline void set_compound_page_dtor(struct page *page, 591 compound_page_dtor *dtor) 592{ 593 page[1].lru.next = (void *)dtor; 594} 595 596static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 597{ 598 return (compound_page_dtor *)page[1].lru.next; 599} 600 601static inline int compound_order(struct page *page) 602{ 603 if (!PageHead(page)) 604 return 0; 605 return (unsigned long)page[1].lru.prev; 606} 607 608static inline void set_compound_order(struct page *page, unsigned long order) 609{ 610 page[1].lru.prev = (void *)order; 611} 612 613#ifdef CONFIG_MMU 614/* 615 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 616 * servicing faults for write access. In the normal case, do always want 617 * pte_mkwrite. But get_user_pages can cause write faults for mappings 618 * that do not have writing enabled, when used by access_process_vm. 619 */ 620static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 621{ 622 if (likely(vma->vm_flags & VM_WRITE)) 623 pte = pte_mkwrite(pte); 624 return pte; 625} 626 627void do_set_pte(struct vm_area_struct *vma, unsigned long address, 628 struct page *page, pte_t *pte, bool write, bool anon); 629#endif 630 631/* 632 * Multiple processes may "see" the same page. E.g. for untouched 633 * mappings of /dev/null, all processes see the same page full of 634 * zeroes, and text pages of executables and shared libraries have 635 * only one copy in memory, at most, normally. 636 * 637 * For the non-reserved pages, page_count(page) denotes a reference count. 638 * page_count() == 0 means the page is free. page->lru is then used for 639 * freelist management in the buddy allocator. 640 * page_count() > 0 means the page has been allocated. 641 * 642 * Pages are allocated by the slab allocator in order to provide memory 643 * to kmalloc and kmem_cache_alloc. In this case, the management of the 644 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 645 * unless a particular usage is carefully commented. (the responsibility of 646 * freeing the kmalloc memory is the caller's, of course). 647 * 648 * A page may be used by anyone else who does a __get_free_page(). 649 * In this case, page_count still tracks the references, and should only 650 * be used through the normal accessor functions. The top bits of page->flags 651 * and page->virtual store page management information, but all other fields 652 * are unused and could be used privately, carefully. The management of this 653 * page is the responsibility of the one who allocated it, and those who have 654 * subsequently been given references to it. 655 * 656 * The other pages (we may call them "pagecache pages") are completely 657 * managed by the Linux memory manager: I/O, buffers, swapping etc. 658 * The following discussion applies only to them. 659 * 660 * A pagecache page contains an opaque `private' member, which belongs to the 661 * page's address_space. Usually, this is the address of a circular list of 662 * the page's disk buffers. PG_private must be set to tell the VM to call 663 * into the filesystem to release these pages. 664 * 665 * A page may belong to an inode's memory mapping. In this case, page->mapping 666 * is the pointer to the inode, and page->index is the file offset of the page, 667 * in units of PAGE_CACHE_SIZE. 668 * 669 * If pagecache pages are not associated with an inode, they are said to be 670 * anonymous pages. These may become associated with the swapcache, and in that 671 * case PG_swapcache is set, and page->private is an offset into the swapcache. 672 * 673 * In either case (swapcache or inode backed), the pagecache itself holds one 674 * reference to the page. Setting PG_private should also increment the 675 * refcount. The each user mapping also has a reference to the page. 676 * 677 * The pagecache pages are stored in a per-mapping radix tree, which is 678 * rooted at mapping->page_tree, and indexed by offset. 679 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 680 * lists, we instead now tag pages as dirty/writeback in the radix tree. 681 * 682 * All pagecache pages may be subject to I/O: 683 * - inode pages may need to be read from disk, 684 * - inode pages which have been modified and are MAP_SHARED may need 685 * to be written back to the inode on disk, 686 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 687 * modified may need to be swapped out to swap space and (later) to be read 688 * back into memory. 689 */ 690 691/* 692 * The zone field is never updated after free_area_init_core() 693 * sets it, so none of the operations on it need to be atomic. 694 */ 695 696/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 697#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 698#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 699#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 700#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 701 702/* 703 * Define the bit shifts to access each section. For non-existent 704 * sections we define the shift as 0; that plus a 0 mask ensures 705 * the compiler will optimise away reference to them. 706 */ 707#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 708#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 709#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 710#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 711 712/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 713#ifdef NODE_NOT_IN_PAGE_FLAGS 714#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 715#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 716 SECTIONS_PGOFF : ZONES_PGOFF) 717#else 718#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 719#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 720 NODES_PGOFF : ZONES_PGOFF) 721#endif 722 723#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 724 725#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 726#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 727#endif 728 729#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 730#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 731#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 732#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 733#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 734 735static inline enum zone_type page_zonenum(const struct page *page) 736{ 737 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 738} 739 740#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 741#define SECTION_IN_PAGE_FLAGS 742#endif 743 744/* 745 * The identification function is mainly used by the buddy allocator for 746 * determining if two pages could be buddies. We are not really identifying 747 * the zone since we could be using the section number id if we do not have 748 * node id available in page flags. 749 * We only guarantee that it will return the same value for two combinable 750 * pages in a zone. 751 */ 752static inline int page_zone_id(struct page *page) 753{ 754 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 755} 756 757static inline int zone_to_nid(struct zone *zone) 758{ 759#ifdef CONFIG_NUMA 760 return zone->node; 761#else 762 return 0; 763#endif 764} 765 766#ifdef NODE_NOT_IN_PAGE_FLAGS 767extern int page_to_nid(const struct page *page); 768#else 769static inline int page_to_nid(const struct page *page) 770{ 771 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 772} 773#endif 774 775#ifdef CONFIG_NUMA_BALANCING 776static inline int cpu_pid_to_cpupid(int cpu, int pid) 777{ 778 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 779} 780 781static inline int cpupid_to_pid(int cpupid) 782{ 783 return cpupid & LAST__PID_MASK; 784} 785 786static inline int cpupid_to_cpu(int cpupid) 787{ 788 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 789} 790 791static inline int cpupid_to_nid(int cpupid) 792{ 793 return cpu_to_node(cpupid_to_cpu(cpupid)); 794} 795 796static inline bool cpupid_pid_unset(int cpupid) 797{ 798 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 799} 800 801static inline bool cpupid_cpu_unset(int cpupid) 802{ 803 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 804} 805 806static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 807{ 808 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 809} 810 811#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 812#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 813static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 814{ 815 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 816} 817 818static inline int page_cpupid_last(struct page *page) 819{ 820 return page->_last_cpupid; 821} 822static inline void page_cpupid_reset_last(struct page *page) 823{ 824 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 825} 826#else 827static inline int page_cpupid_last(struct page *page) 828{ 829 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 830} 831 832extern int page_cpupid_xchg_last(struct page *page, int cpupid); 833 834static inline void page_cpupid_reset_last(struct page *page) 835{ 836 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; 837 838 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); 839 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; 840} 841#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 842#else /* !CONFIG_NUMA_BALANCING */ 843static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 844{ 845 return page_to_nid(page); /* XXX */ 846} 847 848static inline int page_cpupid_last(struct page *page) 849{ 850 return page_to_nid(page); /* XXX */ 851} 852 853static inline int cpupid_to_nid(int cpupid) 854{ 855 return -1; 856} 857 858static inline int cpupid_to_pid(int cpupid) 859{ 860 return -1; 861} 862 863static inline int cpupid_to_cpu(int cpupid) 864{ 865 return -1; 866} 867 868static inline int cpu_pid_to_cpupid(int nid, int pid) 869{ 870 return -1; 871} 872 873static inline bool cpupid_pid_unset(int cpupid) 874{ 875 return 1; 876} 877 878static inline void page_cpupid_reset_last(struct page *page) 879{ 880} 881 882static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 883{ 884 return false; 885} 886#endif /* CONFIG_NUMA_BALANCING */ 887 888static inline struct zone *page_zone(const struct page *page) 889{ 890 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 891} 892 893#ifdef SECTION_IN_PAGE_FLAGS 894static inline void set_page_section(struct page *page, unsigned long section) 895{ 896 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 897 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 898} 899 900static inline unsigned long page_to_section(const struct page *page) 901{ 902 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 903} 904#endif 905 906static inline void set_page_zone(struct page *page, enum zone_type zone) 907{ 908 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 909 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 910} 911 912static inline void set_page_node(struct page *page, unsigned long node) 913{ 914 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 915 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 916} 917 918static inline void set_page_links(struct page *page, enum zone_type zone, 919 unsigned long node, unsigned long pfn) 920{ 921 set_page_zone(page, zone); 922 set_page_node(page, node); 923#ifdef SECTION_IN_PAGE_FLAGS 924 set_page_section(page, pfn_to_section_nr(pfn)); 925#endif 926} 927 928/* 929 * Some inline functions in vmstat.h depend on page_zone() 930 */ 931#include <linux/vmstat.h> 932 933static __always_inline void *lowmem_page_address(const struct page *page) 934{ 935 return __va(PFN_PHYS(page_to_pfn(page))); 936} 937 938#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 939#define HASHED_PAGE_VIRTUAL 940#endif 941 942#if defined(WANT_PAGE_VIRTUAL) 943static inline void *page_address(const struct page *page) 944{ 945 return page->virtual; 946} 947static inline void set_page_address(struct page *page, void *address) 948{ 949 page->virtual = address; 950} 951#define page_address_init() do { } while(0) 952#endif 953 954#if defined(HASHED_PAGE_VIRTUAL) 955void *page_address(const struct page *page); 956void set_page_address(struct page *page, void *virtual); 957void page_address_init(void); 958#endif 959 960#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 961#define page_address(page) lowmem_page_address(page) 962#define set_page_address(page, address) do { } while(0) 963#define page_address_init() do { } while(0) 964#endif 965 966/* 967 * On an anonymous page mapped into a user virtual memory area, 968 * page->mapping points to its anon_vma, not to a struct address_space; 969 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 970 * 971 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 972 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; 973 * and then page->mapping points, not to an anon_vma, but to a private 974 * structure which KSM associates with that merged page. See ksm.h. 975 * 976 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. 977 * 978 * Please note that, confusingly, "page_mapping" refers to the inode 979 * address_space which maps the page from disk; whereas "page_mapped" 980 * refers to user virtual address space into which the page is mapped. 981 */ 982#define PAGE_MAPPING_ANON 1 983#define PAGE_MAPPING_KSM 2 984#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) 985 986extern struct address_space *page_mapping(struct page *page); 987 988/* Neutral page->mapping pointer to address_space or anon_vma or other */ 989static inline void *page_rmapping(struct page *page) 990{ 991 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); 992} 993 994extern struct address_space *__page_file_mapping(struct page *); 995 996static inline 997struct address_space *page_file_mapping(struct page *page) 998{ 999 if (unlikely(PageSwapCache(page))) 1000 return __page_file_mapping(page); 1001 1002 return page->mapping; 1003} 1004 1005static inline int PageAnon(struct page *page) 1006{ 1007 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 1008} 1009 1010/* 1011 * Return the pagecache index of the passed page. Regular pagecache pages 1012 * use ->index whereas swapcache pages use ->private 1013 */ 1014static inline pgoff_t page_index(struct page *page) 1015{ 1016 if (unlikely(PageSwapCache(page))) 1017 return page_private(page); 1018 return page->index; 1019} 1020 1021extern pgoff_t __page_file_index(struct page *page); 1022 1023/* 1024 * Return the file index of the page. Regular pagecache pages use ->index 1025 * whereas swapcache pages use swp_offset(->private) 1026 */ 1027static inline pgoff_t page_file_index(struct page *page) 1028{ 1029 if (unlikely(PageSwapCache(page))) 1030 return __page_file_index(page); 1031 1032 return page->index; 1033} 1034 1035/* 1036 * Return true if this page is mapped into pagetables. 1037 */ 1038static inline int page_mapped(struct page *page) 1039{ 1040 return atomic_read(&(page)->_mapcount) >= 0; 1041} 1042 1043/* 1044 * Different kinds of faults, as returned by handle_mm_fault(). 1045 * Used to decide whether a process gets delivered SIGBUS or 1046 * just gets major/minor fault counters bumped up. 1047 */ 1048 1049#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ 1050 1051#define VM_FAULT_OOM 0x0001 1052#define VM_FAULT_SIGBUS 0x0002 1053#define VM_FAULT_MAJOR 0x0004 1054#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1055#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1056#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1057 1058#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1059#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1060#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1061#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1062 1063#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1064 1065#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1066 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1067 1068/* Encode hstate index for a hwpoisoned large page */ 1069#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1070#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1071 1072/* 1073 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1074 */ 1075extern void pagefault_out_of_memory(void); 1076 1077#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1078 1079/* 1080 * Flags passed to show_mem() and show_free_areas() to suppress output in 1081 * various contexts. 1082 */ 1083#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1084 1085extern void show_free_areas(unsigned int flags); 1086extern bool skip_free_areas_node(unsigned int flags, int nid); 1087 1088void shmem_set_file(struct vm_area_struct *vma, struct file *file); 1089int shmem_zero_setup(struct vm_area_struct *); 1090#ifdef CONFIG_SHMEM 1091bool shmem_mapping(struct address_space *mapping); 1092#else 1093static inline bool shmem_mapping(struct address_space *mapping) 1094{ 1095 return false; 1096} 1097#endif 1098 1099extern int can_do_mlock(void); 1100extern int user_shm_lock(size_t, struct user_struct *); 1101extern void user_shm_unlock(size_t, struct user_struct *); 1102 1103/* 1104 * Parameter block passed down to zap_pte_range in exceptional cases. 1105 */ 1106struct zap_details { 1107 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */ 1108 struct address_space *check_mapping; /* Check page->mapping if set */ 1109 pgoff_t first_index; /* Lowest page->index to unmap */ 1110 pgoff_t last_index; /* Highest page->index to unmap */ 1111}; 1112 1113struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1114 pte_t pte); 1115 1116int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1117 unsigned long size); 1118void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1119 unsigned long size, struct zap_details *); 1120void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1121 unsigned long start, unsigned long end); 1122 1123/** 1124 * mm_walk - callbacks for walk_page_range 1125 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 1126 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 1127 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1128 * this handler is required to be able to handle 1129 * pmd_trans_huge() pmds. They may simply choose to 1130 * split_huge_page() instead of handling it explicitly. 1131 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1132 * @pte_hole: if set, called for each hole at all levels 1133 * @hugetlb_entry: if set, called for each hugetlb entry 1134 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry 1135 * is used. 1136 * 1137 * (see walk_page_range for more details) 1138 */ 1139struct mm_walk { 1140 int (*pgd_entry)(pgd_t *pgd, unsigned long addr, 1141 unsigned long next, struct mm_walk *walk); 1142 int (*pud_entry)(pud_t *pud, unsigned long addr, 1143 unsigned long next, struct mm_walk *walk); 1144 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1145 unsigned long next, struct mm_walk *walk); 1146 int (*pte_entry)(pte_t *pte, unsigned long addr, 1147 unsigned long next, struct mm_walk *walk); 1148 int (*pte_hole)(unsigned long addr, unsigned long next, 1149 struct mm_walk *walk); 1150 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1151 unsigned long addr, unsigned long next, 1152 struct mm_walk *walk); 1153 struct mm_struct *mm; 1154 void *private; 1155}; 1156 1157int walk_page_range(unsigned long addr, unsigned long end, 1158 struct mm_walk *walk); 1159void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1160 unsigned long end, unsigned long floor, unsigned long ceiling); 1161int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1162 struct vm_area_struct *vma); 1163void unmap_mapping_range(struct address_space *mapping, 1164 loff_t const holebegin, loff_t const holelen, int even_cows); 1165int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1166 unsigned long *pfn); 1167int follow_phys(struct vm_area_struct *vma, unsigned long address, 1168 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1169int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1170 void *buf, int len, int write); 1171 1172static inline void unmap_shared_mapping_range(struct address_space *mapping, 1173 loff_t const holebegin, loff_t const holelen) 1174{ 1175 unmap_mapping_range(mapping, holebegin, holelen, 0); 1176} 1177 1178extern void truncate_pagecache(struct inode *inode, loff_t new); 1179extern void truncate_setsize(struct inode *inode, loff_t newsize); 1180void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1181void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1182int truncate_inode_page(struct address_space *mapping, struct page *page); 1183int generic_error_remove_page(struct address_space *mapping, struct page *page); 1184int invalidate_inode_page(struct page *page); 1185 1186#ifdef CONFIG_MMU 1187extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 1188 unsigned long address, unsigned int flags); 1189extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1190 unsigned long address, unsigned int fault_flags); 1191#else 1192static inline int handle_mm_fault(struct mm_struct *mm, 1193 struct vm_area_struct *vma, unsigned long address, 1194 unsigned int flags) 1195{ 1196 /* should never happen if there's no MMU */ 1197 BUG(); 1198 return VM_FAULT_SIGBUS; 1199} 1200static inline int fixup_user_fault(struct task_struct *tsk, 1201 struct mm_struct *mm, unsigned long address, 1202 unsigned int fault_flags) 1203{ 1204 /* should never happen if there's no MMU */ 1205 BUG(); 1206 return -EFAULT; 1207} 1208#endif 1209 1210extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1211extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1212 void *buf, int len, int write); 1213 1214long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1215 unsigned long start, unsigned long nr_pages, 1216 unsigned int foll_flags, struct page **pages, 1217 struct vm_area_struct **vmas, int *nonblocking); 1218long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1219 unsigned long start, unsigned long nr_pages, 1220 int write, int force, struct page **pages, 1221 struct vm_area_struct **vmas); 1222int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1223 struct page **pages); 1224struct kvec; 1225int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1226 struct page **pages); 1227int get_kernel_page(unsigned long start, int write, struct page **pages); 1228struct page *get_dump_page(unsigned long addr); 1229 1230extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1231extern void do_invalidatepage(struct page *page, unsigned int offset, 1232 unsigned int length); 1233 1234int __set_page_dirty_nobuffers(struct page *page); 1235int __set_page_dirty_no_writeback(struct page *page); 1236int redirty_page_for_writepage(struct writeback_control *wbc, 1237 struct page *page); 1238void account_page_dirtied(struct page *page, struct address_space *mapping); 1239int set_page_dirty(struct page *page); 1240int set_page_dirty_lock(struct page *page); 1241int clear_page_dirty_for_io(struct page *page); 1242int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1243 1244/* Is the vma a continuation of the stack vma above it? */ 1245static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1246{ 1247 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1248} 1249 1250static inline int stack_guard_page_start(struct vm_area_struct *vma, 1251 unsigned long addr) 1252{ 1253 return (vma->vm_flags & VM_GROWSDOWN) && 1254 (vma->vm_start == addr) && 1255 !vma_growsdown(vma->vm_prev, addr); 1256} 1257 1258/* Is the vma a continuation of the stack vma below it? */ 1259static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1260{ 1261 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1262} 1263 1264static inline int stack_guard_page_end(struct vm_area_struct *vma, 1265 unsigned long addr) 1266{ 1267 return (vma->vm_flags & VM_GROWSUP) && 1268 (vma->vm_end == addr) && 1269 !vma_growsup(vma->vm_next, addr); 1270} 1271 1272extern struct task_struct *task_of_stack(struct task_struct *task, 1273 struct vm_area_struct *vma, bool in_group); 1274 1275extern unsigned long move_page_tables(struct vm_area_struct *vma, 1276 unsigned long old_addr, struct vm_area_struct *new_vma, 1277 unsigned long new_addr, unsigned long len, 1278 bool need_rmap_locks); 1279extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1280 unsigned long end, pgprot_t newprot, 1281 int dirty_accountable, int prot_numa); 1282extern int mprotect_fixup(struct vm_area_struct *vma, 1283 struct vm_area_struct **pprev, unsigned long start, 1284 unsigned long end, unsigned long newflags); 1285 1286/* 1287 * doesn't attempt to fault and will return short. 1288 */ 1289int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1290 struct page **pages); 1291/* 1292 * per-process(per-mm_struct) statistics. 1293 */ 1294static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1295{ 1296 long val = atomic_long_read(&mm->rss_stat.count[member]); 1297 1298#ifdef SPLIT_RSS_COUNTING 1299 /* 1300 * counter is updated in asynchronous manner and may go to minus. 1301 * But it's never be expected number for users. 1302 */ 1303 if (val < 0) 1304 val = 0; 1305#endif 1306 return (unsigned long)val; 1307} 1308 1309static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1310{ 1311 atomic_long_add(value, &mm->rss_stat.count[member]); 1312} 1313 1314static inline void inc_mm_counter(struct mm_struct *mm, int member) 1315{ 1316 atomic_long_inc(&mm->rss_stat.count[member]); 1317} 1318 1319static inline void dec_mm_counter(struct mm_struct *mm, int member) 1320{ 1321 atomic_long_dec(&mm->rss_stat.count[member]); 1322} 1323 1324static inline unsigned long get_mm_rss(struct mm_struct *mm) 1325{ 1326 return get_mm_counter(mm, MM_FILEPAGES) + 1327 get_mm_counter(mm, MM_ANONPAGES); 1328} 1329 1330static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1331{ 1332 return max(mm->hiwater_rss, get_mm_rss(mm)); 1333} 1334 1335static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1336{ 1337 return max(mm->hiwater_vm, mm->total_vm); 1338} 1339 1340static inline void update_hiwater_rss(struct mm_struct *mm) 1341{ 1342 unsigned long _rss = get_mm_rss(mm); 1343 1344 if ((mm)->hiwater_rss < _rss) 1345 (mm)->hiwater_rss = _rss; 1346} 1347 1348static inline void update_hiwater_vm(struct mm_struct *mm) 1349{ 1350 if (mm->hiwater_vm < mm->total_vm) 1351 mm->hiwater_vm = mm->total_vm; 1352} 1353 1354static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1355{ 1356 mm->hiwater_rss = get_mm_rss(mm); 1357} 1358 1359static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1360 struct mm_struct *mm) 1361{ 1362 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1363 1364 if (*maxrss < hiwater_rss) 1365 *maxrss = hiwater_rss; 1366} 1367 1368#if defined(SPLIT_RSS_COUNTING) 1369void sync_mm_rss(struct mm_struct *mm); 1370#else 1371static inline void sync_mm_rss(struct mm_struct *mm) 1372{ 1373} 1374#endif 1375 1376int vma_wants_writenotify(struct vm_area_struct *vma); 1377 1378extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1379 spinlock_t **ptl); 1380static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1381 spinlock_t **ptl) 1382{ 1383 pte_t *ptep; 1384 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1385 return ptep; 1386} 1387 1388#ifdef __PAGETABLE_PUD_FOLDED 1389static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1390 unsigned long address) 1391{ 1392 return 0; 1393} 1394#else 1395int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1396#endif 1397 1398#ifdef __PAGETABLE_PMD_FOLDED 1399static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1400 unsigned long address) 1401{ 1402 return 0; 1403} 1404#else 1405int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1406#endif 1407 1408int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 1409 pmd_t *pmd, unsigned long address); 1410int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1411 1412/* 1413 * The following ifdef needed to get the 4level-fixup.h header to work. 1414 * Remove it when 4level-fixup.h has been removed. 1415 */ 1416#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1417static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1418{ 1419 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 1420 NULL: pud_offset(pgd, address); 1421} 1422 1423static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1424{ 1425 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1426 NULL: pmd_offset(pud, address); 1427} 1428#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1429 1430#if USE_SPLIT_PTE_PTLOCKS 1431#if ALLOC_SPLIT_PTLOCKS 1432void __init ptlock_cache_init(void); 1433extern bool ptlock_alloc(struct page *page); 1434extern void ptlock_free(struct page *page); 1435 1436static inline spinlock_t *ptlock_ptr(struct page *page) 1437{ 1438 return page->ptl; 1439} 1440#else /* ALLOC_SPLIT_PTLOCKS */ 1441static inline void ptlock_cache_init(void) 1442{ 1443} 1444 1445static inline bool ptlock_alloc(struct page *page) 1446{ 1447 return true; 1448} 1449 1450static inline void ptlock_free(struct page *page) 1451{ 1452} 1453 1454static inline spinlock_t *ptlock_ptr(struct page *page) 1455{ 1456 return &page->ptl; 1457} 1458#endif /* ALLOC_SPLIT_PTLOCKS */ 1459 1460static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1461{ 1462 return ptlock_ptr(pmd_page(*pmd)); 1463} 1464 1465static inline bool ptlock_init(struct page *page) 1466{ 1467 /* 1468 * prep_new_page() initialize page->private (and therefore page->ptl) 1469 * with 0. Make sure nobody took it in use in between. 1470 * 1471 * It can happen if arch try to use slab for page table allocation: 1472 * slab code uses page->slab_cache and page->first_page (for tail 1473 * pages), which share storage with page->ptl. 1474 */ 1475 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1476 if (!ptlock_alloc(page)) 1477 return false; 1478 spin_lock_init(ptlock_ptr(page)); 1479 return true; 1480} 1481 1482/* Reset page->mapping so free_pages_check won't complain. */ 1483static inline void pte_lock_deinit(struct page *page) 1484{ 1485 page->mapping = NULL; 1486 ptlock_free(page); 1487} 1488 1489#else /* !USE_SPLIT_PTE_PTLOCKS */ 1490/* 1491 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1492 */ 1493static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1494{ 1495 return &mm->page_table_lock; 1496} 1497static inline void ptlock_cache_init(void) {} 1498static inline bool ptlock_init(struct page *page) { return true; } 1499static inline void pte_lock_deinit(struct page *page) {} 1500#endif /* USE_SPLIT_PTE_PTLOCKS */ 1501 1502static inline void pgtable_init(void) 1503{ 1504 ptlock_cache_init(); 1505 pgtable_cache_init(); 1506} 1507 1508static inline bool pgtable_page_ctor(struct page *page) 1509{ 1510 inc_zone_page_state(page, NR_PAGETABLE); 1511 return ptlock_init(page); 1512} 1513 1514static inline void pgtable_page_dtor(struct page *page) 1515{ 1516 pte_lock_deinit(page); 1517 dec_zone_page_state(page, NR_PAGETABLE); 1518} 1519 1520#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1521({ \ 1522 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1523 pte_t *__pte = pte_offset_map(pmd, address); \ 1524 *(ptlp) = __ptl; \ 1525 spin_lock(__ptl); \ 1526 __pte; \ 1527}) 1528 1529#define pte_unmap_unlock(pte, ptl) do { \ 1530 spin_unlock(ptl); \ 1531 pte_unmap(pte); \ 1532} while (0) 1533 1534#define pte_alloc_map(mm, vma, pmd, address) \ 1535 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ 1536 pmd, address))? \ 1537 NULL: pte_offset_map(pmd, address)) 1538 1539#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1540 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ 1541 pmd, address))? \ 1542 NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) 1543 1544#define pte_alloc_kernel(pmd, address) \ 1545 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1546 NULL: pte_offset_kernel(pmd, address)) 1547 1548#if USE_SPLIT_PMD_PTLOCKS 1549 1550static struct page *pmd_to_page(pmd_t *pmd) 1551{ 1552 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1553 return virt_to_page((void *)((unsigned long) pmd & mask)); 1554} 1555 1556static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1557{ 1558 return ptlock_ptr(pmd_to_page(pmd)); 1559} 1560 1561static inline bool pgtable_pmd_page_ctor(struct page *page) 1562{ 1563#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1564 page->pmd_huge_pte = NULL; 1565#endif 1566 return ptlock_init(page); 1567} 1568 1569static inline void pgtable_pmd_page_dtor(struct page *page) 1570{ 1571#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1572 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1573#endif 1574 ptlock_free(page); 1575} 1576 1577#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1578 1579#else 1580 1581static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1582{ 1583 return &mm->page_table_lock; 1584} 1585 1586static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1587static inline void pgtable_pmd_page_dtor(struct page *page) {} 1588 1589#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1590 1591#endif 1592 1593static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1594{ 1595 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1596 spin_lock(ptl); 1597 return ptl; 1598} 1599 1600extern void free_area_init(unsigned long * zones_size); 1601extern void free_area_init_node(int nid, unsigned long * zones_size, 1602 unsigned long zone_start_pfn, unsigned long *zholes_size); 1603extern void free_initmem(void); 1604 1605/* 1606 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1607 * into the buddy system. The freed pages will be poisoned with pattern 1608 * "poison" if it's within range [0, UCHAR_MAX]. 1609 * Return pages freed into the buddy system. 1610 */ 1611extern unsigned long free_reserved_area(void *start, void *end, 1612 int poison, char *s); 1613 1614#ifdef CONFIG_HIGHMEM 1615/* 1616 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1617 * and totalram_pages. 1618 */ 1619extern void free_highmem_page(struct page *page); 1620#endif 1621 1622extern void adjust_managed_page_count(struct page *page, long count); 1623extern void mem_init_print_info(const char *str); 1624 1625/* Free the reserved page into the buddy system, so it gets managed. */ 1626static inline void __free_reserved_page(struct page *page) 1627{ 1628 ClearPageReserved(page); 1629 init_page_count(page); 1630 __free_page(page); 1631} 1632 1633static inline void free_reserved_page(struct page *page) 1634{ 1635 __free_reserved_page(page); 1636 adjust_managed_page_count(page, 1); 1637} 1638 1639static inline void mark_page_reserved(struct page *page) 1640{ 1641 SetPageReserved(page); 1642 adjust_managed_page_count(page, -1); 1643} 1644 1645/* 1646 * Default method to free all the __init memory into the buddy system. 1647 * The freed pages will be poisoned with pattern "poison" if it's within 1648 * range [0, UCHAR_MAX]. 1649 * Return pages freed into the buddy system. 1650 */ 1651static inline unsigned long free_initmem_default(int poison) 1652{ 1653 extern char __init_begin[], __init_end[]; 1654 1655 return free_reserved_area(&__init_begin, &__init_end, 1656 poison, "unused kernel"); 1657} 1658 1659static inline unsigned long get_num_physpages(void) 1660{ 1661 int nid; 1662 unsigned long phys_pages = 0; 1663 1664 for_each_online_node(nid) 1665 phys_pages += node_present_pages(nid); 1666 1667 return phys_pages; 1668} 1669 1670#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1671/* 1672 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1673 * zones, allocate the backing mem_map and account for memory holes in a more 1674 * architecture independent manner. This is a substitute for creating the 1675 * zone_sizes[] and zholes_size[] arrays and passing them to 1676 * free_area_init_node() 1677 * 1678 * An architecture is expected to register range of page frames backed by 1679 * physical memory with memblock_add[_node]() before calling 1680 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1681 * usage, an architecture is expected to do something like 1682 * 1683 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1684 * max_highmem_pfn}; 1685 * for_each_valid_physical_page_range() 1686 * memblock_add_node(base, size, nid) 1687 * free_area_init_nodes(max_zone_pfns); 1688 * 1689 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 1690 * registered physical page range. Similarly 1691 * sparse_memory_present_with_active_regions() calls memory_present() for 1692 * each range when SPARSEMEM is enabled. 1693 * 1694 * See mm/page_alloc.c for more information on each function exposed by 1695 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 1696 */ 1697extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1698unsigned long node_map_pfn_alignment(void); 1699unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1700 unsigned long end_pfn); 1701extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1702 unsigned long end_pfn); 1703extern void get_pfn_range_for_nid(unsigned int nid, 1704 unsigned long *start_pfn, unsigned long *end_pfn); 1705extern unsigned long find_min_pfn_with_active_regions(void); 1706extern void free_bootmem_with_active_regions(int nid, 1707 unsigned long max_low_pfn); 1708extern void sparse_memory_present_with_active_regions(int nid); 1709 1710#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1711 1712#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1713 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1714static inline int __early_pfn_to_nid(unsigned long pfn) 1715{ 1716 return 0; 1717} 1718#else 1719/* please see mm/page_alloc.c */ 1720extern int __meminit early_pfn_to_nid(unsigned long pfn); 1721/* there is a per-arch backend function. */ 1722extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1723#endif 1724 1725extern void set_dma_reserve(unsigned long new_dma_reserve); 1726extern void memmap_init_zone(unsigned long, int, unsigned long, 1727 unsigned long, enum memmap_context); 1728extern void setup_per_zone_wmarks(void); 1729extern int __meminit init_per_zone_wmark_min(void); 1730extern void mem_init(void); 1731extern void __init mmap_init(void); 1732extern void show_mem(unsigned int flags); 1733extern void si_meminfo(struct sysinfo * val); 1734extern void si_meminfo_node(struct sysinfo *val, int nid); 1735 1736extern __printf(3, 4) 1737void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1738 1739extern void setup_per_cpu_pageset(void); 1740 1741extern void zone_pcp_update(struct zone *zone); 1742extern void zone_pcp_reset(struct zone *zone); 1743 1744/* page_alloc.c */ 1745extern int min_free_kbytes; 1746 1747/* nommu.c */ 1748extern atomic_long_t mmap_pages_allocated; 1749extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1750 1751/* interval_tree.c */ 1752void vma_interval_tree_insert(struct vm_area_struct *node, 1753 struct rb_root *root); 1754void vma_interval_tree_insert_after(struct vm_area_struct *node, 1755 struct vm_area_struct *prev, 1756 struct rb_root *root); 1757void vma_interval_tree_remove(struct vm_area_struct *node, 1758 struct rb_root *root); 1759struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, 1760 unsigned long start, unsigned long last); 1761struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 1762 unsigned long start, unsigned long last); 1763 1764#define vma_interval_tree_foreach(vma, root, start, last) \ 1765 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1766 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1767 1768static inline void vma_nonlinear_insert(struct vm_area_struct *vma, 1769 struct list_head *list) 1770{ 1771 list_add_tail(&vma->shared.nonlinear, list); 1772} 1773 1774void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1775 struct rb_root *root); 1776void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1777 struct rb_root *root); 1778struct anon_vma_chain *anon_vma_interval_tree_iter_first( 1779 struct rb_root *root, unsigned long start, unsigned long last); 1780struct anon_vma_chain *anon_vma_interval_tree_iter_next( 1781 struct anon_vma_chain *node, unsigned long start, unsigned long last); 1782#ifdef CONFIG_DEBUG_VM_RB 1783void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 1784#endif 1785 1786#define anon_vma_interval_tree_foreach(avc, root, start, last) \ 1787 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 1788 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 1789 1790/* mmap.c */ 1791extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1792extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, 1793 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1794extern struct vm_area_struct *vma_merge(struct mm_struct *, 1795 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1796 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1797 struct mempolicy *, const char __user *); 1798extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1799extern int split_vma(struct mm_struct *, 1800 struct vm_area_struct *, unsigned long addr, int new_below); 1801extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 1802extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 1803 struct rb_node **, struct rb_node *); 1804extern void unlink_file_vma(struct vm_area_struct *); 1805extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 1806 unsigned long addr, unsigned long len, pgoff_t pgoff, 1807 bool *need_rmap_locks); 1808extern void exit_mmap(struct mm_struct *); 1809 1810static inline int check_data_rlimit(unsigned long rlim, 1811 unsigned long new, 1812 unsigned long start, 1813 unsigned long end_data, 1814 unsigned long start_data) 1815{ 1816 if (rlim < RLIM_INFINITY) { 1817 if (((new - start) + (end_data - start_data)) > rlim) 1818 return -ENOSPC; 1819 } 1820 1821 return 0; 1822} 1823 1824extern int mm_take_all_locks(struct mm_struct *mm); 1825extern void mm_drop_all_locks(struct mm_struct *mm); 1826 1827extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 1828extern struct file *get_mm_exe_file(struct mm_struct *mm); 1829 1830extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1831extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 1832 unsigned long addr, unsigned long len, 1833 unsigned long flags, 1834 const struct vm_special_mapping *spec); 1835/* This is an obsolete alternative to _install_special_mapping. */ 1836extern int install_special_mapping(struct mm_struct *mm, 1837 unsigned long addr, unsigned long len, 1838 unsigned long flags, struct page **pages); 1839 1840extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1841 1842extern unsigned long mmap_region(struct file *file, unsigned long addr, 1843 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 1844extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1845 unsigned long len, unsigned long prot, unsigned long flags, 1846 unsigned long pgoff, unsigned long *populate); 1847extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1848 1849#ifdef CONFIG_MMU 1850extern int __mm_populate(unsigned long addr, unsigned long len, 1851 int ignore_errors); 1852static inline void mm_populate(unsigned long addr, unsigned long len) 1853{ 1854 /* Ignore errors */ 1855 (void) __mm_populate(addr, len, 1); 1856} 1857#else 1858static inline void mm_populate(unsigned long addr, unsigned long len) {} 1859#endif 1860 1861/* These take the mm semaphore themselves */ 1862extern unsigned long vm_brk(unsigned long, unsigned long); 1863extern int vm_munmap(unsigned long, size_t); 1864extern unsigned long vm_mmap(struct file *, unsigned long, 1865 unsigned long, unsigned long, 1866 unsigned long, unsigned long); 1867 1868struct vm_unmapped_area_info { 1869#define VM_UNMAPPED_AREA_TOPDOWN 1 1870 unsigned long flags; 1871 unsigned long length; 1872 unsigned long low_limit; 1873 unsigned long high_limit; 1874 unsigned long align_mask; 1875 unsigned long align_offset; 1876}; 1877 1878extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 1879extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 1880 1881/* 1882 * Search for an unmapped address range. 1883 * 1884 * We are looking for a range that: 1885 * - does not intersect with any VMA; 1886 * - is contained within the [low_limit, high_limit) interval; 1887 * - is at least the desired size. 1888 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 1889 */ 1890static inline unsigned long 1891vm_unmapped_area(struct vm_unmapped_area_info *info) 1892{ 1893 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) 1894 return unmapped_area(info); 1895 else 1896 return unmapped_area_topdown(info); 1897} 1898 1899/* truncate.c */ 1900extern void truncate_inode_pages(struct address_space *, loff_t); 1901extern void truncate_inode_pages_range(struct address_space *, 1902 loff_t lstart, loff_t lend); 1903extern void truncate_inode_pages_final(struct address_space *); 1904 1905/* generic vm_area_ops exported for stackable file systems */ 1906extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 1907extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); 1908extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1909 1910/* mm/page-writeback.c */ 1911int write_one_page(struct page *page, int wait); 1912void task_dirty_inc(struct task_struct *tsk); 1913 1914/* readahead.c */ 1915#define VM_MAX_READAHEAD 128 /* kbytes */ 1916#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1917 1918int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1919 pgoff_t offset, unsigned long nr_to_read); 1920 1921void page_cache_sync_readahead(struct address_space *mapping, 1922 struct file_ra_state *ra, 1923 struct file *filp, 1924 pgoff_t offset, 1925 unsigned long size); 1926 1927void page_cache_async_readahead(struct address_space *mapping, 1928 struct file_ra_state *ra, 1929 struct file *filp, 1930 struct page *pg, 1931 pgoff_t offset, 1932 unsigned long size); 1933 1934unsigned long max_sane_readahead(unsigned long nr); 1935 1936/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 1937extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1938 1939/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 1940extern int expand_downwards(struct vm_area_struct *vma, 1941 unsigned long address); 1942#if VM_GROWSUP 1943extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1944#else 1945 #define expand_upwards(vma, address) do { } while (0) 1946#endif 1947 1948/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1949extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1950extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 1951 struct vm_area_struct **pprev); 1952 1953/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 1954 NULL if none. Assume start_addr < end_addr. */ 1955static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 1956{ 1957 struct vm_area_struct * vma = find_vma(mm,start_addr); 1958 1959 if (vma && end_addr <= vma->vm_start) 1960 vma = NULL; 1961 return vma; 1962} 1963 1964static inline unsigned long vma_pages(struct vm_area_struct *vma) 1965{ 1966 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1967} 1968 1969/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 1970static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 1971 unsigned long vm_start, unsigned long vm_end) 1972{ 1973 struct vm_area_struct *vma = find_vma(mm, vm_start); 1974 1975 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 1976 vma = NULL; 1977 1978 return vma; 1979} 1980 1981#ifdef CONFIG_MMU 1982pgprot_t vm_get_page_prot(unsigned long vm_flags); 1983void vma_set_page_prot(struct vm_area_struct *vma); 1984#else 1985static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 1986{ 1987 return __pgprot(0); 1988} 1989static inline void vma_set_page_prot(struct vm_area_struct *vma) 1990{ 1991 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1992} 1993#endif 1994 1995#ifdef CONFIG_NUMA_BALANCING 1996unsigned long change_prot_numa(struct vm_area_struct *vma, 1997 unsigned long start, unsigned long end); 1998#endif 1999 2000struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2001int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2002 unsigned long pfn, unsigned long size, pgprot_t); 2003int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2004int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2005 unsigned long pfn); 2006int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2007 unsigned long pfn); 2008int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2009 2010 2011struct page *follow_page_mask(struct vm_area_struct *vma, 2012 unsigned long address, unsigned int foll_flags, 2013 unsigned int *page_mask); 2014 2015static inline struct page *follow_page(struct vm_area_struct *vma, 2016 unsigned long address, unsigned int foll_flags) 2017{ 2018 unsigned int unused_page_mask; 2019 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 2020} 2021 2022#define FOLL_WRITE 0x01 /* check pte is writable */ 2023#define FOLL_TOUCH 0x02 /* mark page accessed */ 2024#define FOLL_GET 0x04 /* do get_page on page */ 2025#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2026#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2027#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2028 * and return without waiting upon it */ 2029#define FOLL_MLOCK 0x40 /* mark page as mlocked */ 2030#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2031#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2032#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2033#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2034#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2035 2036typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2037 void *data); 2038extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2039 unsigned long size, pte_fn_t fn, void *data); 2040 2041#ifdef CONFIG_PROC_FS 2042void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); 2043#else 2044static inline void vm_stat_account(struct mm_struct *mm, 2045 unsigned long flags, struct file *file, long pages) 2046{ 2047 mm->total_vm += pages; 2048} 2049#endif /* CONFIG_PROC_FS */ 2050 2051#ifdef CONFIG_DEBUG_PAGEALLOC 2052extern void kernel_map_pages(struct page *page, int numpages, int enable); 2053#ifdef CONFIG_HIBERNATION 2054extern bool kernel_page_present(struct page *page); 2055#endif /* CONFIG_HIBERNATION */ 2056#else 2057static inline void 2058kernel_map_pages(struct page *page, int numpages, int enable) {} 2059#ifdef CONFIG_HIBERNATION 2060static inline bool kernel_page_present(struct page *page) { return true; } 2061#endif /* CONFIG_HIBERNATION */ 2062#endif 2063 2064#ifdef __HAVE_ARCH_GATE_AREA 2065extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2066extern int in_gate_area_no_mm(unsigned long addr); 2067extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2068#else 2069static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2070{ 2071 return NULL; 2072} 2073static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2074static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2075{ 2076 return 0; 2077} 2078#endif /* __HAVE_ARCH_GATE_AREA */ 2079 2080#ifdef CONFIG_SYSCTL 2081extern int sysctl_drop_caches; 2082int drop_caches_sysctl_handler(struct ctl_table *, int, 2083 void __user *, size_t *, loff_t *); 2084#endif 2085 2086unsigned long shrink_slab(struct shrink_control *shrink, 2087 unsigned long nr_pages_scanned, 2088 unsigned long lru_pages); 2089 2090#ifndef CONFIG_MMU 2091#define randomize_va_space 0 2092#else 2093extern int randomize_va_space; 2094#endif 2095 2096const char * arch_vma_name(struct vm_area_struct *vma); 2097void print_vma_addr(char *prefix, unsigned long rip); 2098 2099void sparse_mem_maps_populate_node(struct page **map_map, 2100 unsigned long pnum_begin, 2101 unsigned long pnum_end, 2102 unsigned long map_count, 2103 int nodeid); 2104 2105struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2106pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2107pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 2108pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2109pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2110void *vmemmap_alloc_block(unsigned long size, int node); 2111void *vmemmap_alloc_block_buf(unsigned long size, int node); 2112void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2113int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2114 int node); 2115int vmemmap_populate(unsigned long start, unsigned long end, int node); 2116void vmemmap_populate_print_last(void); 2117#ifdef CONFIG_MEMORY_HOTPLUG 2118void vmemmap_free(unsigned long start, unsigned long end); 2119#endif 2120void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2121 unsigned long size); 2122 2123enum mf_flags { 2124 MF_COUNT_INCREASED = 1 << 0, 2125 MF_ACTION_REQUIRED = 1 << 1, 2126 MF_MUST_KILL = 1 << 2, 2127 MF_SOFT_OFFLINE = 1 << 3, 2128}; 2129extern int memory_failure(unsigned long pfn, int trapno, int flags); 2130extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2131extern int unpoison_memory(unsigned long pfn); 2132extern int sysctl_memory_failure_early_kill; 2133extern int sysctl_memory_failure_recovery; 2134extern void shake_page(struct page *p, int access); 2135extern atomic_long_t num_poisoned_pages; 2136extern int soft_offline_page(struct page *page, int flags); 2137 2138#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2139extern void clear_huge_page(struct page *page, 2140 unsigned long addr, 2141 unsigned int pages_per_huge_page); 2142extern void copy_user_huge_page(struct page *dst, struct page *src, 2143 unsigned long addr, struct vm_area_struct *vma, 2144 unsigned int pages_per_huge_page); 2145#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2146 2147#ifdef CONFIG_DEBUG_PAGEALLOC 2148extern unsigned int _debug_guardpage_minorder; 2149 2150static inline unsigned int debug_guardpage_minorder(void) 2151{ 2152 return _debug_guardpage_minorder; 2153} 2154 2155static inline bool page_is_guard(struct page *page) 2156{ 2157 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 2158} 2159#else 2160static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2161static inline bool page_is_guard(struct page *page) { return false; } 2162#endif /* CONFIG_DEBUG_PAGEALLOC */ 2163 2164#if MAX_NUMNODES > 1 2165void __init setup_nr_node_ids(void); 2166#else 2167static inline void setup_nr_node_ids(void) {} 2168#endif 2169 2170#endif /* __KERNEL__ */ 2171#endif /* _LINUX_MM_H */ 2172