swap_state.c revision e767e0561d7fd2333df1921f1ab4176211f9036b
1/* 2 * linux/mm/swap_state.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 * 7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 8 */ 9#include <linux/module.h> 10#include <linux/mm.h> 11#include <linux/kernel_stat.h> 12#include <linux/swap.h> 13#include <linux/swapops.h> 14#include <linux/init.h> 15#include <linux/pagemap.h> 16#include <linux/buffer_head.h> 17#include <linux/backing-dev.h> 18#include <linux/pagevec.h> 19#include <linux/migrate.h> 20#include <linux/page_cgroup.h> 21 22#include <asm/pgtable.h> 23 24/* 25 * swapper_space is a fiction, retained to simplify the path through 26 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow 27 * future use of radix_tree tags in the swap cache. 28 */ 29static const struct address_space_operations swap_aops = { 30 .writepage = swap_writepage, 31 .sync_page = block_sync_page, 32 .set_page_dirty = __set_page_dirty_nobuffers, 33 .migratepage = migrate_page, 34}; 35 36static struct backing_dev_info swap_backing_dev_info = { 37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 38 .unplug_io_fn = swap_unplug_io_fn, 39}; 40 41struct address_space swapper_space = { 42 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 43 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), 44 .a_ops = &swap_aops, 45 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 46 .backing_dev_info = &swap_backing_dev_info, 47}; 48 49#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) 50 51static struct { 52 unsigned long add_total; 53 unsigned long del_total; 54 unsigned long find_success; 55 unsigned long find_total; 56} swap_cache_info; 57 58void show_swap_cache_info(void) 59{ 60 printk("%lu pages in swap cache\n", total_swapcache_pages); 61 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 62 swap_cache_info.add_total, swap_cache_info.del_total, 63 swap_cache_info.find_success, swap_cache_info.find_total); 64 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); 65 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 66} 67 68/* 69 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 70 * but sets SwapCache flag and private instead of mapping and index. 71 */ 72int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) 73{ 74 int error; 75 76 VM_BUG_ON(!PageLocked(page)); 77 VM_BUG_ON(PageSwapCache(page)); 78 VM_BUG_ON(!PageSwapBacked(page)); 79 80 error = radix_tree_preload(gfp_mask); 81 if (!error) { 82 page_cache_get(page); 83 SetPageSwapCache(page); 84 set_page_private(page, entry.val); 85 86 spin_lock_irq(&swapper_space.tree_lock); 87 error = radix_tree_insert(&swapper_space.page_tree, 88 entry.val, page); 89 if (likely(!error)) { 90 total_swapcache_pages++; 91 __inc_zone_page_state(page, NR_FILE_PAGES); 92 INC_CACHE_INFO(add_total); 93 } 94 spin_unlock_irq(&swapper_space.tree_lock); 95 radix_tree_preload_end(); 96 97 if (unlikely(error)) { 98 set_page_private(page, 0UL); 99 ClearPageSwapCache(page); 100 page_cache_release(page); 101 } 102 } 103 return error; 104} 105 106/* 107 * This must be called only on pages that have 108 * been verified to be in the swap cache. 109 */ 110void __delete_from_swap_cache(struct page *page) 111{ 112 VM_BUG_ON(!PageLocked(page)); 113 VM_BUG_ON(!PageSwapCache(page)); 114 VM_BUG_ON(PageWriteback(page)); 115 116 radix_tree_delete(&swapper_space.page_tree, page_private(page)); 117 set_page_private(page, 0); 118 ClearPageSwapCache(page); 119 total_swapcache_pages--; 120 __dec_zone_page_state(page, NR_FILE_PAGES); 121 INC_CACHE_INFO(del_total); 122} 123 124/** 125 * add_to_swap - allocate swap space for a page 126 * @page: page we want to move to swap 127 * @gfp_mask: memory allocation flags 128 * 129 * Allocate swap space for the page and add the page to the 130 * swap cache. Caller needs to hold the page lock. 131 */ 132int add_to_swap(struct page *page) 133{ 134 swp_entry_t entry; 135 int err; 136 137 VM_BUG_ON(!PageLocked(page)); 138 VM_BUG_ON(!PageUptodate(page)); 139 140 for (;;) { 141 entry = get_swap_page(); 142 if (!entry.val) 143 return 0; 144 145 /* 146 * Radix-tree node allocations from PF_MEMALLOC contexts could 147 * completely exhaust the page allocator. __GFP_NOMEMALLOC 148 * stops emergency reserves from being allocated. 149 * 150 * TODO: this could cause a theoretical memory reclaim 151 * deadlock in the swap out path. 152 */ 153 /* 154 * Add it to the swap cache and mark it dirty 155 */ 156 err = add_to_swap_cache(page, entry, 157 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); 158 159 switch (err) { 160 case 0: /* Success */ 161 SetPageDirty(page); 162 return 1; 163 case -EEXIST: 164 /* Raced with "speculative" read_swap_cache_async */ 165 swap_free(entry); 166 continue; 167 default: 168 /* -ENOMEM radix-tree allocation failure */ 169 swap_free(entry); 170 return 0; 171 } 172 } 173} 174 175/* 176 * This must be called only on pages that have 177 * been verified to be in the swap cache and locked. 178 * It will never put the page into the free list, 179 * the caller has a reference on the page. 180 */ 181void delete_from_swap_cache(struct page *page) 182{ 183 swp_entry_t entry; 184 185 entry.val = page_private(page); 186 187 spin_lock_irq(&swapper_space.tree_lock); 188 __delete_from_swap_cache(page); 189 spin_unlock_irq(&swapper_space.tree_lock); 190 191 mem_cgroup_uncharge_swapcache(page, entry); 192 swap_free(entry); 193 page_cache_release(page); 194} 195 196/* 197 * If we are the only user, then try to free up the swap cache. 198 * 199 * Its ok to check for PageSwapCache without the page lock 200 * here because we are going to recheck again inside 201 * try_to_free_swap() _with_ the lock. 202 * - Marcelo 203 */ 204static inline void free_swap_cache(struct page *page) 205{ 206 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 207 try_to_free_swap(page); 208 unlock_page(page); 209 } 210} 211 212/* 213 * Perform a free_page(), also freeing any swap cache associated with 214 * this page if it is the last user of the page. 215 */ 216void free_page_and_swap_cache(struct page *page) 217{ 218 free_swap_cache(page); 219 page_cache_release(page); 220} 221 222/* 223 * Passed an array of pages, drop them all from swapcache and then release 224 * them. They are removed from the LRU and freed if this is their last use. 225 */ 226void free_pages_and_swap_cache(struct page **pages, int nr) 227{ 228 struct page **pagep = pages; 229 230 lru_add_drain(); 231 while (nr) { 232 int todo = min(nr, PAGEVEC_SIZE); 233 int i; 234 235 for (i = 0; i < todo; i++) 236 free_swap_cache(pagep[i]); 237 release_pages(pagep, todo, 0); 238 pagep += todo; 239 nr -= todo; 240 } 241} 242 243/* 244 * Lookup a swap entry in the swap cache. A found page will be returned 245 * unlocked and with its refcount incremented - we rely on the kernel 246 * lock getting page table operations atomic even if we drop the page 247 * lock before returning. 248 */ 249struct page * lookup_swap_cache(swp_entry_t entry) 250{ 251 struct page *page; 252 253 page = find_get_page(&swapper_space, entry.val); 254 255 if (page) 256 INC_CACHE_INFO(find_success); 257 258 INC_CACHE_INFO(find_total); 259 return page; 260} 261 262/* 263 * Locate a page of swap in physical memory, reserving swap cache space 264 * and reading the disk if it is not already cached. 265 * A failure return means that either the page allocation failed or that 266 * the swap entry is no longer in use. 267 */ 268struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 269 struct vm_area_struct *vma, unsigned long addr) 270{ 271 struct page *found_page, *new_page = NULL; 272 int err; 273 274 do { 275 /* 276 * First check the swap cache. Since this is normally 277 * called after lookup_swap_cache() failed, re-calling 278 * that would confuse statistics. 279 */ 280 found_page = find_get_page(&swapper_space, entry.val); 281 if (found_page) 282 break; 283 284 /* 285 * Get a new page to read into from swap. 286 */ 287 if (!new_page) { 288 new_page = alloc_page_vma(gfp_mask, vma, addr); 289 if (!new_page) 290 break; /* Out of memory */ 291 } 292 293 /* 294 * Swap entry may have been freed since our caller observed it. 295 */ 296 if (!swap_duplicate(entry)) 297 break; 298 299 /* 300 * Associate the page with swap entry in the swap cache. 301 * May fail (-EEXIST) if there is already a page associated 302 * with this entry in the swap cache: added by a racing 303 * read_swap_cache_async, or add_to_swap or shmem_writepage 304 * re-using the just freed swap entry for an existing page. 305 * May fail (-ENOMEM) if radix-tree node allocation failed. 306 */ 307 __set_page_locked(new_page); 308 SetPageSwapBacked(new_page); 309 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); 310 if (likely(!err)) { 311 /* 312 * Initiate read into locked page and return. 313 */ 314 lru_cache_add_anon(new_page); 315 swap_readpage(NULL, new_page); 316 return new_page; 317 } 318 ClearPageSwapBacked(new_page); 319 __clear_page_locked(new_page); 320 swap_free(entry); 321 } while (err != -ENOMEM); 322 323 if (new_page) 324 page_cache_release(new_page); 325 return found_page; 326} 327 328/** 329 * swapin_readahead - swap in pages in hope we need them soon 330 * @entry: swap entry of this memory 331 * @gfp_mask: memory allocation flags 332 * @vma: user vma this address belongs to 333 * @addr: target address for mempolicy 334 * 335 * Returns the struct page for entry and addr, after queueing swapin. 336 * 337 * Primitive swap readahead code. We simply read an aligned block of 338 * (1 << page_cluster) entries in the swap area. This method is chosen 339 * because it doesn't cost us any seek time. We also make sure to queue 340 * the 'original' request together with the readahead ones... 341 * 342 * This has been extended to use the NUMA policies from the mm triggering 343 * the readahead. 344 * 345 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 346 */ 347struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 348 struct vm_area_struct *vma, unsigned long addr) 349{ 350 int nr_pages; 351 struct page *page; 352 unsigned long offset; 353 unsigned long end_offset; 354 355 /* 356 * Get starting offset for readaround, and number of pages to read. 357 * Adjust starting address by readbehind (for NUMA interleave case)? 358 * No, it's very unlikely that swap layout would follow vma layout, 359 * more likely that neighbouring swap pages came from the same node: 360 * so use the same "addr" to choose the same node for each swap read. 361 */ 362 nr_pages = valid_swaphandles(entry, &offset); 363 for (end_offset = offset + nr_pages; offset < end_offset; offset++) { 364 /* Ok, do the async read-ahead now */ 365 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 366 gfp_mask, vma, addr); 367 if (!page) 368 break; 369 page_cache_release(page); 370 } 371 lru_add_drain(); /* Push any new pages onto the LRU now */ 372 return read_swap_cache_async(entry, gfp_mask, vma, addr); 373} 374