readahead.c revision 6b10c6c9fbfe754e8482efb8c8b84f8e40c0f2eb
1/* 2 * mm/readahead.c - address_space-level file readahead. 3 * 4 * Copyright (C) 2002, Linus Torvalds 5 * 6 * 09Apr2002 akpm@zip.com.au 7 * Initial version. 8 */ 9 10#include <linux/kernel.h> 11#include <linux/fs.h> 12#include <linux/mm.h> 13#include <linux/module.h> 14#include <linux/blkdev.h> 15#include <linux/backing-dev.h> 16#include <linux/task_io_accounting_ops.h> 17#include <linux/pagevec.h> 18#include <linux/pagemap.h> 19 20void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 21{ 22} 23EXPORT_SYMBOL(default_unplug_io_fn); 24 25/* 26 * Convienent macros for min/max read-ahead pages. 27 * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up. 28 * The latter is necessary for systems with large page size(i.e. 64k). 29 */ 30#define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE) 31#define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE) 32 33struct backing_dev_info default_backing_dev_info = { 34 .ra_pages = MAX_RA_PAGES, 35 .state = 0, 36 .capabilities = BDI_CAP_MAP_COPY, 37 .unplug_io_fn = default_unplug_io_fn, 38}; 39EXPORT_SYMBOL_GPL(default_backing_dev_info); 40 41/* 42 * Initialise a struct file's readahead state. Assumes that the caller has 43 * memset *ra to zero. 44 */ 45void 46file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) 47{ 48 ra->ra_pages = mapping->backing_dev_info->ra_pages; 49 ra->prev_pos = -1; 50} 51EXPORT_SYMBOL_GPL(file_ra_state_init); 52 53#define list_to_page(head) (list_entry((head)->prev, struct page, lru)) 54 55/** 56 * read_cache_pages - populate an address space with some pages & start reads against them 57 * @mapping: the address_space 58 * @pages: The address of a list_head which contains the target pages. These 59 * pages have their ->index populated and are otherwise uninitialised. 60 * @filler: callback routine for filling a single page. 61 * @data: private data for the callback routine. 62 * 63 * Hides the details of the LRU cache etc from the filesystems. 64 */ 65int read_cache_pages(struct address_space *mapping, struct list_head *pages, 66 int (*filler)(void *, struct page *), void *data) 67{ 68 struct page *page; 69 struct pagevec lru_pvec; 70 int ret = 0; 71 72 pagevec_init(&lru_pvec, 0); 73 74 while (!list_empty(pages)) { 75 page = list_to_page(pages); 76 list_del(&page->lru); 77 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { 78 page_cache_release(page); 79 continue; 80 } 81 ret = filler(data, page); 82 if (!pagevec_add(&lru_pvec, page)) 83 __pagevec_lru_add(&lru_pvec); 84 if (ret) { 85 put_pages_list(pages); 86 break; 87 } 88 task_io_account_read(PAGE_CACHE_SIZE); 89 } 90 pagevec_lru_add(&lru_pvec); 91 return ret; 92} 93 94EXPORT_SYMBOL(read_cache_pages); 95 96static int read_pages(struct address_space *mapping, struct file *filp, 97 struct list_head *pages, unsigned nr_pages) 98{ 99 unsigned page_idx; 100 struct pagevec lru_pvec; 101 int ret; 102 103 if (mapping->a_ops->readpages) { 104 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); 105 /* Clean up the remaining pages */ 106 put_pages_list(pages); 107 goto out; 108 } 109 110 pagevec_init(&lru_pvec, 0); 111 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 112 struct page *page = list_to_page(pages); 113 list_del(&page->lru); 114 if (!add_to_page_cache(page, mapping, 115 page->index, GFP_KERNEL)) { 116 mapping->a_ops->readpage(filp, page); 117 if (!pagevec_add(&lru_pvec, page)) 118 __pagevec_lru_add(&lru_pvec); 119 } else 120 page_cache_release(page); 121 } 122 pagevec_lru_add(&lru_pvec); 123 ret = 0; 124out: 125 return ret; 126} 127 128/* 129 * do_page_cache_readahead actually reads a chunk of disk. It allocates all 130 * the pages first, then submits them all for I/O. This avoids the very bad 131 * behaviour which would occur if page allocations are causing VM writeback. 132 * We really don't want to intermingle reads and writes like that. 133 * 134 * Returns the number of pages requested, or the maximum amount of I/O allowed. 135 * 136 * do_page_cache_readahead() returns -1 if it encountered request queue 137 * congestion. 138 */ 139static int 140__do_page_cache_readahead(struct address_space *mapping, struct file *filp, 141 pgoff_t offset, unsigned long nr_to_read, 142 unsigned long lookahead_size) 143{ 144 struct inode *inode = mapping->host; 145 struct page *page; 146 unsigned long end_index; /* The last page we want to read */ 147 LIST_HEAD(page_pool); 148 int page_idx; 149 int ret = 0; 150 loff_t isize = i_size_read(inode); 151 152 if (isize == 0) 153 goto out; 154 155 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); 156 157 /* 158 * Preallocate as many pages as we will need. 159 */ 160 read_lock_irq(&mapping->tree_lock); 161 for (page_idx = 0; page_idx < nr_to_read; page_idx++) { 162 pgoff_t page_offset = offset + page_idx; 163 164 if (page_offset > end_index) 165 break; 166 167 page = radix_tree_lookup(&mapping->page_tree, page_offset); 168 if (page) 169 continue; 170 171 read_unlock_irq(&mapping->tree_lock); 172 page = page_cache_alloc_cold(mapping); 173 read_lock_irq(&mapping->tree_lock); 174 if (!page) 175 break; 176 page->index = page_offset; 177 list_add(&page->lru, &page_pool); 178 if (page_idx == nr_to_read - lookahead_size) 179 SetPageReadahead(page); 180 ret++; 181 } 182 read_unlock_irq(&mapping->tree_lock); 183 184 /* 185 * Now start the IO. We ignore I/O errors - if the page is not 186 * uptodate then the caller will launch readpage again, and 187 * will then handle the error. 188 */ 189 if (ret) 190 read_pages(mapping, filp, &page_pool, ret); 191 BUG_ON(!list_empty(&page_pool)); 192out: 193 return ret; 194} 195 196/* 197 * Chunk the readahead into 2 megabyte units, so that we don't pin too much 198 * memory at once. 199 */ 200int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 201 pgoff_t offset, unsigned long nr_to_read) 202{ 203 int ret = 0; 204 205 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) 206 return -EINVAL; 207 208 while (nr_to_read) { 209 int err; 210 211 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; 212 213 if (this_chunk > nr_to_read) 214 this_chunk = nr_to_read; 215 err = __do_page_cache_readahead(mapping, filp, 216 offset, this_chunk, 0); 217 if (err < 0) { 218 ret = err; 219 break; 220 } 221 ret += err; 222 offset += this_chunk; 223 nr_to_read -= this_chunk; 224 } 225 return ret; 226} 227 228/* 229 * This version skips the IO if the queue is read-congested, and will tell the 230 * block layer to abandon the readahead if request allocation would block. 231 * 232 * force_page_cache_readahead() will ignore queue congestion and will block on 233 * request queues. 234 */ 235int do_page_cache_readahead(struct address_space *mapping, struct file *filp, 236 pgoff_t offset, unsigned long nr_to_read) 237{ 238 if (bdi_read_congested(mapping->backing_dev_info)) 239 return -1; 240 241 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); 242} 243 244/* 245 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a 246 * sensible upper limit. 247 */ 248unsigned long max_sane_readahead(unsigned long nr) 249{ 250 return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) 251 + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); 252} 253 254/* 255 * Submit IO for the read-ahead request in file_ra_state. 256 */ 257static unsigned long ra_submit(struct file_ra_state *ra, 258 struct address_space *mapping, struct file *filp) 259{ 260 int actual; 261 262 actual = __do_page_cache_readahead(mapping, filp, 263 ra->start, ra->size, ra->async_size); 264 265 return actual; 266} 267 268/* 269 * Set the initial window size, round to next power of 2 and square 270 * for small size, x 4 for medium, and x 2 for large 271 * for 128k (32 page) max ra 272 * 1-8 page = 32k initial, > 8 page = 128k initial 273 */ 274static unsigned long get_init_ra_size(unsigned long size, unsigned long max) 275{ 276 unsigned long newsize = roundup_pow_of_two(size); 277 278 if (newsize <= max / 32) 279 newsize = newsize * 4; 280 else if (newsize <= max / 4) 281 newsize = newsize * 2; 282 else 283 newsize = max; 284 285 return newsize; 286} 287 288/* 289 * Get the previous window size, ramp it up, and 290 * return it as the new window size. 291 */ 292static unsigned long get_next_ra_size(struct file_ra_state *ra, 293 unsigned long max) 294{ 295 unsigned long cur = ra->size; 296 unsigned long newsize; 297 298 if (cur < max / 16) 299 newsize = 4 * cur; 300 else 301 newsize = 2 * cur; 302 303 return min(newsize, max); 304} 305 306/* 307 * On-demand readahead design. 308 * 309 * The fields in struct file_ra_state represent the most-recently-executed 310 * readahead attempt: 311 * 312 * |<----- async_size ---------| 313 * |------------------- size -------------------->| 314 * |==================#===========================| 315 * ^start ^page marked with PG_readahead 316 * 317 * To overlap application thinking time and disk I/O time, we do 318 * `readahead pipelining': Do not wait until the application consumed all 319 * readahead pages and stalled on the missing page at readahead_index; 320 * Instead, submit an asynchronous readahead I/O as soon as there are 321 * only async_size pages left in the readahead window. Normally async_size 322 * will be equal to size, for maximum pipelining. 323 * 324 * In interleaved sequential reads, concurrent streams on the same fd can 325 * be invalidating each other's readahead state. So we flag the new readahead 326 * page at (start+size-async_size) with PG_readahead, and use it as readahead 327 * indicator. The flag won't be set on already cached pages, to avoid the 328 * readahead-for-nothing fuss, saving pointless page cache lookups. 329 * 330 * prev_pos tracks the last visited byte in the _previous_ read request. 331 * It should be maintained by the caller, and will be used for detecting 332 * small random reads. Note that the readahead algorithm checks loosely 333 * for sequential patterns. Hence interleaved reads might be served as 334 * sequential ones. 335 * 336 * There is a special-case: if the first page which the application tries to 337 * read happens to be the first page of the file, it is assumed that a linear 338 * read is about to happen and the window is immediately set to the initial size 339 * based on I/O request size and the max_readahead. 340 * 341 * The code ramps up the readahead size aggressively at first, but slow down as 342 * it approaches max_readhead. 343 */ 344 345/* 346 * A minimal readahead algorithm for trivial sequential/random reads. 347 */ 348static unsigned long 349ondemand_readahead(struct address_space *mapping, 350 struct file_ra_state *ra, struct file *filp, 351 bool hit_readahead_marker, pgoff_t offset, 352 unsigned long req_size) 353{ 354 int max = ra->ra_pages; /* max readahead pages */ 355 pgoff_t prev_offset; 356 int sequential; 357 358 /* 359 * It's the expected callback offset, assume sequential access. 360 * Ramp up sizes, and push forward the readahead window. 361 */ 362 if (offset && (offset == (ra->start + ra->size - ra->async_size) || 363 offset == (ra->start + ra->size))) { 364 ra->start += ra->size; 365 ra->size = get_next_ra_size(ra, max); 366 ra->async_size = ra->size; 367 goto readit; 368 } 369 370 prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; 371 sequential = offset - prev_offset <= 1UL || req_size > max; 372 373 /* 374 * Standalone, small read. 375 * Read as is, and do not pollute the readahead state. 376 */ 377 if (!hit_readahead_marker && !sequential) { 378 return __do_page_cache_readahead(mapping, filp, 379 offset, req_size, 0); 380 } 381 382 /* 383 * Hit a marked page without valid readahead state. 384 * E.g. interleaved reads. 385 * Query the pagecache for async_size, which normally equals to 386 * readahead size. Ramp it up and use it as the new readahead size. 387 */ 388 if (hit_readahead_marker) { 389 pgoff_t start; 390 391 read_lock_irq(&mapping->tree_lock); 392 start = radix_tree_next_hole(&mapping->page_tree, offset, max+1); 393 read_unlock_irq(&mapping->tree_lock); 394 395 if (!start || start - offset > max) 396 return 0; 397 398 ra->start = start; 399 ra->size = start - offset; /* old async_size */ 400 ra->size = get_next_ra_size(ra, max); 401 ra->async_size = ra->size; 402 goto readit; 403 } 404 405 /* 406 * It may be one of 407 * - first read on start of file 408 * - sequential cache miss 409 * - oversize random read 410 * Start readahead for it. 411 */ 412 ra->start = offset; 413 ra->size = get_init_ra_size(req_size, max); 414 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; 415 416readit: 417 return ra_submit(ra, mapping, filp); 418} 419 420/** 421 * page_cache_sync_readahead - generic file readahead 422 * @mapping: address_space which holds the pagecache and I/O vectors 423 * @ra: file_ra_state which holds the readahead state 424 * @filp: passed on to ->readpage() and ->readpages() 425 * @offset: start offset into @mapping, in pagecache page-sized units 426 * @req_size: hint: total size of the read which the caller is performing in 427 * pagecache pages 428 * 429 * page_cache_sync_readahead() should be called when a cache miss happened: 430 * it will submit the read. The readahead logic may decide to piggyback more 431 * pages onto the read request if access patterns suggest it will improve 432 * performance. 433 */ 434void page_cache_sync_readahead(struct address_space *mapping, 435 struct file_ra_state *ra, struct file *filp, 436 pgoff_t offset, unsigned long req_size) 437{ 438 /* no read-ahead */ 439 if (!ra->ra_pages) 440 return; 441 442 /* do read-ahead */ 443 ondemand_readahead(mapping, ra, filp, false, offset, req_size); 444} 445EXPORT_SYMBOL_GPL(page_cache_sync_readahead); 446 447/** 448 * page_cache_async_readahead - file readahead for marked pages 449 * @mapping: address_space which holds the pagecache and I/O vectors 450 * @ra: file_ra_state which holds the readahead state 451 * @filp: passed on to ->readpage() and ->readpages() 452 * @page: the page at @offset which has the PG_readahead flag set 453 * @offset: start offset into @mapping, in pagecache page-sized units 454 * @req_size: hint: total size of the read which the caller is performing in 455 * pagecache pages 456 * 457 * page_cache_async_ondemand() should be called when a page is used which 458 * has the PG_readahead flag: this is a marker to suggest that the application 459 * has used up enough of the readahead window that we should start pulling in 460 * more pages. */ 461void 462page_cache_async_readahead(struct address_space *mapping, 463 struct file_ra_state *ra, struct file *filp, 464 struct page *page, pgoff_t offset, 465 unsigned long req_size) 466{ 467 /* no read-ahead */ 468 if (!ra->ra_pages) 469 return; 470 471 /* 472 * Same bit is used for PG_readahead and PG_reclaim. 473 */ 474 if (PageWriteback(page)) 475 return; 476 477 ClearPageReadahead(page); 478 479 /* 480 * Defer asynchronous read-ahead on IO congestion. 481 */ 482 if (bdi_read_congested(mapping->backing_dev_info)) 483 return; 484 485 /* do read-ahead */ 486 ondemand_readahead(mapping, ra, filp, true, offset, req_size); 487} 488EXPORT_SYMBOL_GPL(page_cache_async_readahead); 489