truncate.c revision 83f786680aec8d030184f7ced1a0a3dd8ac81764
1/* 2 * mm/truncate.c - code for taking down pages from address_spaces 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 10Sep2002 Andrew Morton 7 * Initial version. 8 */ 9 10#include <linux/kernel.h> 11#include <linux/backing-dev.h> 12#include <linux/mm.h> 13#include <linux/swap.h> 14#include <linux/module.h> 15#include <linux/pagemap.h> 16#include <linux/highmem.h> 17#include <linux/pagevec.h> 18#include <linux/task_io_accounting_ops.h> 19#include <linux/buffer_head.h> /* grr. try_to_release_page, 20 do_invalidatepage */ 21#include "internal.h" 22 23 24/** 25 * do_invalidatepage - invalidate part or all of a page 26 * @page: the page which is affected 27 * @offset: the index of the truncation point 28 * 29 * do_invalidatepage() is called when all or part of the page has become 30 * invalidated by a truncate operation. 31 * 32 * do_invalidatepage() does not have to release all buffers, but it must 33 * ensure that no dirty buffer is left outside @offset and that no I/O 34 * is underway against any of the blocks which are outside the truncation 35 * point. Because the caller is about to free (and possibly reuse) those 36 * blocks on-disk. 37 */ 38void do_invalidatepage(struct page *page, unsigned long offset) 39{ 40 void (*invalidatepage)(struct page *, unsigned long); 41 invalidatepage = page->mapping->a_ops->invalidatepage; 42#ifdef CONFIG_BLOCK 43 if (!invalidatepage) 44 invalidatepage = block_invalidatepage; 45#endif 46 if (invalidatepage) 47 (*invalidatepage)(page, offset); 48} 49 50static inline void truncate_partial_page(struct page *page, unsigned partial) 51{ 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 53 if (page_has_private(page)) 54 do_invalidatepage(page, partial); 55} 56 57/* 58 * This cancels just the dirty bit on the kernel page itself, it 59 * does NOT actually remove dirty bits on any mmap's that may be 60 * around. It also leaves the page tagged dirty, so any sync 61 * activity will still find it on the dirty lists, and in particular, 62 * clear_page_dirty_for_io() will still look at the dirty bits in 63 * the VM. 64 * 65 * Doing this should *normally* only ever be done when a page 66 * is truncated, and is not actually mapped anywhere at all. However, 67 * fs/buffer.c does this when it notices that somebody has cleaned 68 * out all the buffers on a page without actually doing it through 69 * the VM. Can you say "ext3 is horribly ugly"? Tought you could. 70 */ 71void cancel_dirty_page(struct page *page, unsigned int account_size) 72{ 73 if (TestClearPageDirty(page)) { 74 struct address_space *mapping = page->mapping; 75 if (mapping && mapping_cap_account_dirty(mapping)) { 76 dec_zone_page_state(page, NR_FILE_DIRTY); 77 dec_bdi_stat(mapping->backing_dev_info, 78 BDI_RECLAIMABLE); 79 if (account_size) 80 task_io_account_cancelled_write(account_size); 81 } 82 } 83} 84EXPORT_SYMBOL(cancel_dirty_page); 85 86/* 87 * If truncate cannot remove the fs-private metadata from the page, the page 88 * becomes orphaned. It will be left on the LRU and may even be mapped into 89 * user pagetables if we're racing with filemap_fault(). 90 * 91 * We need to bale out if page->mapping is no longer equal to the original 92 * mapping. This happens a) when the VM reclaimed the page while we waited on 93 * its lock, b) when a concurrent invalidate_mapping_pages got there first and 94 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. 95 */ 96static int 97truncate_complete_page(struct address_space *mapping, struct page *page) 98{ 99 if (page->mapping != mapping) 100 return -EIO; 101 102 if (page_has_private(page)) 103 do_invalidatepage(page, 0); 104 105 cancel_dirty_page(page, PAGE_CACHE_SIZE); 106 107 clear_page_mlock(page); 108 remove_from_page_cache(page); 109 ClearPageMappedToDisk(page); 110 page_cache_release(page); /* pagecache ref */ 111 return 0; 112} 113 114/* 115 * This is for invalidate_mapping_pages(). That function can be called at 116 * any time, and is not supposed to throw away dirty pages. But pages can 117 * be marked dirty at any time too, so use remove_mapping which safely 118 * discards clean, unused pages. 119 * 120 * Returns non-zero if the page was successfully invalidated. 121 */ 122static int 123invalidate_complete_page(struct address_space *mapping, struct page *page) 124{ 125 int ret; 126 127 if (page->mapping != mapping) 128 return 0; 129 130 if (page_has_private(page) && !try_to_release_page(page, 0)) 131 return 0; 132 133 clear_page_mlock(page); 134 ret = remove_mapping(mapping, page); 135 136 return ret; 137} 138 139int truncate_inode_page(struct address_space *mapping, struct page *page) 140{ 141 if (page_mapped(page)) { 142 unmap_mapping_range(mapping, 143 (loff_t)page->index << PAGE_CACHE_SHIFT, 144 PAGE_CACHE_SIZE, 0); 145 } 146 return truncate_complete_page(mapping, page); 147} 148 149/* 150 * Safely invalidate one page from its pagecache mapping. 151 * It only drops clean, unused pages. The page must be locked. 152 * 153 * Returns 1 if the page is successfully invalidated, otherwise 0. 154 */ 155int invalidate_inode_page(struct page *page) 156{ 157 struct address_space *mapping = page_mapping(page); 158 if (!mapping) 159 return 0; 160 if (PageDirty(page) || PageWriteback(page)) 161 return 0; 162 if (page_mapped(page)) 163 return 0; 164 return invalidate_complete_page(mapping, page); 165} 166 167/** 168 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets 169 * @mapping: mapping to truncate 170 * @lstart: offset from which to truncate 171 * @lend: offset to which to truncate 172 * 173 * Truncate the page cache, removing the pages that are between 174 * specified offsets (and zeroing out partial page 175 * (if lstart is not page aligned)). 176 * 177 * Truncate takes two passes - the first pass is nonblocking. It will not 178 * block on page locks and it will not block on writeback. The second pass 179 * will wait. This is to prevent as much IO as possible in the affected region. 180 * The first pass will remove most pages, so the search cost of the second pass 181 * is low. 182 * 183 * When looking at page->index outside the page lock we need to be careful to 184 * copy it into a local to avoid races (it could change at any time). 185 * 186 * We pass down the cache-hot hint to the page freeing code. Even if the 187 * mapping is large, it is probably the case that the final pages are the most 188 * recently touched, and freeing happens in ascending file offset order. 189 */ 190void truncate_inode_pages_range(struct address_space *mapping, 191 loff_t lstart, loff_t lend) 192{ 193 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 194 pgoff_t end; 195 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 196 struct pagevec pvec; 197 pgoff_t next; 198 int i; 199 200 if (mapping->nrpages == 0) 201 return; 202 203 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 204 end = (lend >> PAGE_CACHE_SHIFT); 205 206 pagevec_init(&pvec, 0); 207 next = start; 208 while (next <= end && 209 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 210 for (i = 0; i < pagevec_count(&pvec); i++) { 211 struct page *page = pvec.pages[i]; 212 pgoff_t page_index = page->index; 213 214 if (page_index > end) { 215 next = page_index; 216 break; 217 } 218 219 if (page_index > next) 220 next = page_index; 221 next++; 222 if (!trylock_page(page)) 223 continue; 224 if (PageWriteback(page)) { 225 unlock_page(page); 226 continue; 227 } 228 truncate_inode_page(mapping, page); 229 unlock_page(page); 230 } 231 pagevec_release(&pvec); 232 cond_resched(); 233 } 234 235 if (partial) { 236 struct page *page = find_lock_page(mapping, start - 1); 237 if (page) { 238 wait_on_page_writeback(page); 239 truncate_partial_page(page, partial); 240 unlock_page(page); 241 page_cache_release(page); 242 } 243 } 244 245 next = start; 246 for ( ; ; ) { 247 cond_resched(); 248 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 249 if (next == start) 250 break; 251 next = start; 252 continue; 253 } 254 if (pvec.pages[0]->index > end) { 255 pagevec_release(&pvec); 256 break; 257 } 258 for (i = 0; i < pagevec_count(&pvec); i++) { 259 struct page *page = pvec.pages[i]; 260 261 if (page->index > end) 262 break; 263 lock_page(page); 264 wait_on_page_writeback(page); 265 truncate_inode_page(mapping, page); 266 if (page->index > next) 267 next = page->index; 268 next++; 269 unlock_page(page); 270 } 271 pagevec_release(&pvec); 272 } 273} 274EXPORT_SYMBOL(truncate_inode_pages_range); 275 276/** 277 * truncate_inode_pages - truncate *all* the pages from an offset 278 * @mapping: mapping to truncate 279 * @lstart: offset from which to truncate 280 * 281 * Called under (and serialised by) inode->i_mutex. 282 */ 283void truncate_inode_pages(struct address_space *mapping, loff_t lstart) 284{ 285 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); 286} 287EXPORT_SYMBOL(truncate_inode_pages); 288 289/** 290 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode 291 * @mapping: the address_space which holds the pages to invalidate 292 * @start: the offset 'from' which to invalidate 293 * @end: the offset 'to' which to invalidate (inclusive) 294 * 295 * This function only removes the unlocked pages, if you want to 296 * remove all the pages of one inode, you must call truncate_inode_pages. 297 * 298 * invalidate_mapping_pages() will not block on IO activity. It will not 299 * invalidate pages which are dirty, locked, under writeback or mapped into 300 * pagetables. 301 */ 302unsigned long invalidate_mapping_pages(struct address_space *mapping, 303 pgoff_t start, pgoff_t end) 304{ 305 struct pagevec pvec; 306 pgoff_t next = start; 307 unsigned long ret = 0; 308 int i; 309 310 pagevec_init(&pvec, 0); 311 while (next <= end && 312 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 313 for (i = 0; i < pagevec_count(&pvec); i++) { 314 struct page *page = pvec.pages[i]; 315 pgoff_t index; 316 int lock_failed; 317 318 lock_failed = !trylock_page(page); 319 320 /* 321 * We really shouldn't be looking at the ->index of an 322 * unlocked page. But we're not allowed to lock these 323 * pages. So we rely upon nobody altering the ->index 324 * of this (pinned-by-us) page. 325 */ 326 index = page->index; 327 if (index > next) 328 next = index; 329 next++; 330 if (lock_failed) 331 continue; 332 333 ret += invalidate_inode_page(page); 334 335 unlock_page(page); 336 if (next > end) 337 break; 338 } 339 pagevec_release(&pvec); 340 cond_resched(); 341 } 342 return ret; 343} 344EXPORT_SYMBOL(invalidate_mapping_pages); 345 346/* 347 * This is like invalidate_complete_page(), except it ignores the page's 348 * refcount. We do this because invalidate_inode_pages2() needs stronger 349 * invalidation guarantees, and cannot afford to leave pages behind because 350 * shrink_page_list() has a temp ref on them, or because they're transiently 351 * sitting in the lru_cache_add() pagevecs. 352 */ 353static int 354invalidate_complete_page2(struct address_space *mapping, struct page *page) 355{ 356 if (page->mapping != mapping) 357 return 0; 358 359 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) 360 return 0; 361 362 spin_lock_irq(&mapping->tree_lock); 363 if (PageDirty(page)) 364 goto failed; 365 366 clear_page_mlock(page); 367 BUG_ON(page_has_private(page)); 368 __remove_from_page_cache(page); 369 spin_unlock_irq(&mapping->tree_lock); 370 mem_cgroup_uncharge_cache_page(page); 371 page_cache_release(page); /* pagecache ref */ 372 return 1; 373failed: 374 spin_unlock_irq(&mapping->tree_lock); 375 return 0; 376} 377 378static int do_launder_page(struct address_space *mapping, struct page *page) 379{ 380 if (!PageDirty(page)) 381 return 0; 382 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) 383 return 0; 384 return mapping->a_ops->launder_page(page); 385} 386 387/** 388 * invalidate_inode_pages2_range - remove range of pages from an address_space 389 * @mapping: the address_space 390 * @start: the page offset 'from' which to invalidate 391 * @end: the page offset 'to' which to invalidate (inclusive) 392 * 393 * Any pages which are found to be mapped into pagetables are unmapped prior to 394 * invalidation. 395 * 396 * Returns -EBUSY if any pages could not be invalidated. 397 */ 398int invalidate_inode_pages2_range(struct address_space *mapping, 399 pgoff_t start, pgoff_t end) 400{ 401 struct pagevec pvec; 402 pgoff_t next; 403 int i; 404 int ret = 0; 405 int ret2 = 0; 406 int did_range_unmap = 0; 407 int wrapped = 0; 408 409 pagevec_init(&pvec, 0); 410 next = start; 411 while (next <= end && !wrapped && 412 pagevec_lookup(&pvec, mapping, next, 413 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { 414 for (i = 0; i < pagevec_count(&pvec); i++) { 415 struct page *page = pvec.pages[i]; 416 pgoff_t page_index; 417 418 lock_page(page); 419 if (page->mapping != mapping) { 420 unlock_page(page); 421 continue; 422 } 423 page_index = page->index; 424 next = page_index + 1; 425 if (next == 0) 426 wrapped = 1; 427 if (page_index > end) { 428 unlock_page(page); 429 break; 430 } 431 wait_on_page_writeback(page); 432 if (page_mapped(page)) { 433 if (!did_range_unmap) { 434 /* 435 * Zap the rest of the file in one hit. 436 */ 437 unmap_mapping_range(mapping, 438 (loff_t)page_index<<PAGE_CACHE_SHIFT, 439 (loff_t)(end - page_index + 1) 440 << PAGE_CACHE_SHIFT, 441 0); 442 did_range_unmap = 1; 443 } else { 444 /* 445 * Just zap this page 446 */ 447 unmap_mapping_range(mapping, 448 (loff_t)page_index<<PAGE_CACHE_SHIFT, 449 PAGE_CACHE_SIZE, 0); 450 } 451 } 452 BUG_ON(page_mapped(page)); 453 ret2 = do_launder_page(mapping, page); 454 if (ret2 == 0) { 455 if (!invalidate_complete_page2(mapping, page)) 456 ret2 = -EBUSY; 457 } 458 if (ret2 < 0) 459 ret = ret2; 460 unlock_page(page); 461 } 462 pagevec_release(&pvec); 463 cond_resched(); 464 } 465 return ret; 466} 467EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 468 469/** 470 * invalidate_inode_pages2 - remove all pages from an address_space 471 * @mapping: the address_space 472 * 473 * Any pages which are found to be mapped into pagetables are unmapped prior to 474 * invalidation. 475 * 476 * Returns -EIO if any pages could not be invalidated. 477 */ 478int invalidate_inode_pages2(struct address_space *mapping) 479{ 480 return invalidate_inode_pages2_range(mapping, 0, -1); 481} 482EXPORT_SYMBOL_GPL(invalidate_inode_pages2); 483