readahead.c revision d41cc702cc4ba3782ebe3b2e189633607d5ccd6a
1/*
2 * mm/readahead.c - address_space-level file readahead.
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
6 * 09Apr2002	akpm@zip.com.au
7 *		Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/backing-dev.h>
16#include <linux/pagevec.h>
17
18void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
19{
20}
21EXPORT_SYMBOL(default_unplug_io_fn);
22
23struct backing_dev_info default_backing_dev_info = {
24	.ra_pages	= (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
25	.state		= 0,
26	.capabilities	= BDI_CAP_MAP_COPY,
27	.unplug_io_fn	= default_unplug_io_fn,
28};
29EXPORT_SYMBOL_GPL(default_backing_dev_info);
30
31/*
32 * Initialise a struct file's readahead state.  Assumes that the caller has
33 * memset *ra to zero.
34 */
35void
36file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37{
38	ra->ra_pages = mapping->backing_dev_info->ra_pages;
39	ra->prev_page = -1;
40}
41EXPORT_SYMBOL_GPL(file_ra_state_init);
42
43/*
44 * Return max readahead size for this inode in number-of-pages.
45 */
46static inline unsigned long get_max_readahead(struct file_ra_state *ra)
47{
48	return ra->ra_pages;
49}
50
51static inline unsigned long get_min_readahead(struct file_ra_state *ra)
52{
53	return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
54}
55
56static inline void ra_off(struct file_ra_state *ra)
57{
58	ra->start = 0;
59	ra->flags = 0;
60	ra->size = 0;
61	ra->ahead_start = 0;
62	ra->ahead_size = 0;
63	return;
64}
65
66/*
67 * Set the initial window size, round to next power of 2 and square
68 * for small size, x 4 for medium, and x 2 for large
69 * for 128k (32 page) max ra
70 * 1-8 page = 32k initial, > 8 page = 128k initial
71 */
72static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
73{
74	unsigned long newsize = roundup_pow_of_two(size);
75
76	if (newsize <= max / 64)
77		newsize = newsize * newsize;
78	else if (newsize <= max / 4)
79		newsize = max / 4;
80	else
81		newsize = max;
82	return newsize;
83}
84
85/*
86 * Set the new window size, this is called only when I/O is to be submitted,
87 * not for each call to readahead.  If a cache miss occured, reduce next I/O
88 * size, else increase depending on how close to max we are.
89 */
90static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
91{
92	unsigned long max = get_max_readahead(ra);
93	unsigned long min = get_min_readahead(ra);
94	unsigned long cur = ra->size;
95	unsigned long newsize;
96
97	if (ra->flags & RA_FLAG_MISS) {
98		ra->flags &= ~RA_FLAG_MISS;
99		newsize = max((cur - 2), min);
100	} else if (cur < max / 16) {
101		newsize = 4 * cur;
102	} else {
103		newsize = 2 * cur;
104	}
105	return min(newsize, max);
106}
107
108#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
109
110/**
111 * read_cache_pages - populate an address space with some pages, and
112 * 			start reads against them.
113 * @mapping: the address_space
114 * @pages: The address of a list_head which contains the target pages.  These
115 *   pages have their ->index populated and are otherwise uninitialised.
116 * @filler: callback routine for filling a single page.
117 * @data: private data for the callback routine.
118 *
119 * Hides the details of the LRU cache etc from the filesystems.
120 */
121int read_cache_pages(struct address_space *mapping, struct list_head *pages,
122			int (*filler)(void *, struct page *), void *data)
123{
124	struct page *page;
125	struct pagevec lru_pvec;
126	int ret = 0;
127
128	pagevec_init(&lru_pvec, 0);
129
130	while (!list_empty(pages)) {
131		page = list_to_page(pages);
132		list_del(&page->lru);
133		if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
134			page_cache_release(page);
135			continue;
136		}
137		ret = filler(data, page);
138		if (!pagevec_add(&lru_pvec, page))
139			__pagevec_lru_add(&lru_pvec);
140		if (ret) {
141			while (!list_empty(pages)) {
142				struct page *victim;
143
144				victim = list_to_page(pages);
145				list_del(&victim->lru);
146				page_cache_release(victim);
147			}
148			break;
149		}
150	}
151	pagevec_lru_add(&lru_pvec);
152	return ret;
153}
154
155EXPORT_SYMBOL(read_cache_pages);
156
157static int read_pages(struct address_space *mapping, struct file *filp,
158		struct list_head *pages, unsigned nr_pages)
159{
160	unsigned page_idx;
161	struct pagevec lru_pvec;
162	int ret;
163
164	if (mapping->a_ops->readpages) {
165		ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
166		goto out;
167	}
168
169	pagevec_init(&lru_pvec, 0);
170	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
171		struct page *page = list_to_page(pages);
172		list_del(&page->lru);
173		if (!add_to_page_cache(page, mapping,
174					page->index, GFP_KERNEL)) {
175			ret = mapping->a_ops->readpage(filp, page);
176			if (ret != AOP_TRUNCATED_PAGE) {
177				if (!pagevec_add(&lru_pvec, page))
178					__pagevec_lru_add(&lru_pvec);
179				continue;
180			} /* else fall through to release */
181		}
182		page_cache_release(page);
183	}
184	pagevec_lru_add(&lru_pvec);
185	ret = 0;
186out:
187	return ret;
188}
189
190/*
191 * Readahead design.
192 *
193 * The fields in struct file_ra_state represent the most-recently-executed
194 * readahead attempt:
195 *
196 * start:	Page index at which we started the readahead
197 * size:	Number of pages in that read
198 *              Together, these form the "current window".
199 *              Together, start and size represent the `readahead window'.
200 * prev_page:   The page which the readahead algorithm most-recently inspected.
201 *              It is mainly used to detect sequential file reading.
202 *              If page_cache_readahead sees that it is again being called for
203 *              a page which it just looked at, it can return immediately without
204 *              making any state changes.
205 * ahead_start,
206 * ahead_size:  Together, these form the "ahead window".
207 * ra_pages:	The externally controlled max readahead for this fd.
208 *
209 * When readahead is in the off state (size == 0), readahead is disabled.
210 * In this state, prev_page is used to detect the resumption of sequential I/O.
211 *
212 * The readahead code manages two windows - the "current" and the "ahead"
213 * windows.  The intent is that while the application is walking the pages
214 * in the current window, I/O is underway on the ahead window.  When the
215 * current window is fully traversed, it is replaced by the ahead window
216 * and the ahead window is invalidated.  When this copying happens, the
217 * new current window's pages are probably still locked.  So
218 * we submit a new batch of I/O immediately, creating a new ahead window.
219 *
220 * So:
221 *
222 *   ----|----------------|----------------|-----
223 *       ^start           ^start+size
224 *                        ^ahead_start     ^ahead_start+ahead_size
225 *
226 *         ^ When this page is read, we submit I/O for the
227 *           ahead window.
228 *
229 * A `readahead hit' occurs when a read request is made against a page which is
230 * the next sequential page. Ahead window calculations are done only when it
231 * is time to submit a new IO.  The code ramps up the size agressively at first,
232 * but slow down as it approaches max_readhead.
233 *
234 * Any seek/ramdom IO will result in readahead being turned off.  It will resume
235 * at the first sequential access.
236 *
237 * There is a special-case: if the first page which the application tries to
238 * read happens to be the first page of the file, it is assumed that a linear
239 * read is about to happen and the window is immediately set to the initial size
240 * based on I/O request size and the max_readahead.
241 *
242 * This function is to be called for every read request, rather than when
243 * it is time to perform readahead.  It is called only once for the entire I/O
244 * regardless of size unless readahead is unable to start enough I/O to satisfy
245 * the request (I/O request > max_readahead).
246 */
247
248/*
249 * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
250 * the pages first, then submits them all for I/O. This avoids the very bad
251 * behaviour which would occur if page allocations are causing VM writeback.
252 * We really don't want to intermingle reads and writes like that.
253 *
254 * Returns the number of pages requested, or the maximum amount of I/O allowed.
255 *
256 * do_page_cache_readahead() returns -1 if it encountered request queue
257 * congestion.
258 */
259static int
260__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
261			pgoff_t offset, unsigned long nr_to_read)
262{
263	struct inode *inode = mapping->host;
264	struct page *page;
265	unsigned long end_index;	/* The last page we want to read */
266	LIST_HEAD(page_pool);
267	int page_idx;
268	int ret = 0;
269	loff_t isize = i_size_read(inode);
270
271	if (isize == 0)
272		goto out;
273
274 	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
275
276	/*
277	 * Preallocate as many pages as we will need.
278	 */
279	read_lock_irq(&mapping->tree_lock);
280	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
281		pgoff_t page_offset = offset + page_idx;
282
283		if (page_offset > end_index)
284			break;
285
286		page = radix_tree_lookup(&mapping->page_tree, page_offset);
287		if (page)
288			continue;
289
290		read_unlock_irq(&mapping->tree_lock);
291		page = page_cache_alloc_cold(mapping);
292		read_lock_irq(&mapping->tree_lock);
293		if (!page)
294			break;
295		page->index = page_offset;
296		list_add(&page->lru, &page_pool);
297		ret++;
298	}
299	read_unlock_irq(&mapping->tree_lock);
300
301	/*
302	 * Now start the IO.  We ignore I/O errors - if the page is not
303	 * uptodate then the caller will launch readpage again, and
304	 * will then handle the error.
305	 */
306	if (ret)
307		read_pages(mapping, filp, &page_pool, ret);
308	BUG_ON(!list_empty(&page_pool));
309out:
310	return ret;
311}
312
313/*
314 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
315 * memory at once.
316 */
317int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
318		pgoff_t offset, unsigned long nr_to_read)
319{
320	int ret = 0;
321
322	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
323		return -EINVAL;
324
325	while (nr_to_read) {
326		int err;
327
328		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
329
330		if (this_chunk > nr_to_read)
331			this_chunk = nr_to_read;
332		err = __do_page_cache_readahead(mapping, filp,
333						offset, this_chunk);
334		if (err < 0) {
335			ret = err;
336			break;
337		}
338		ret += err;
339		offset += this_chunk;
340		nr_to_read -= this_chunk;
341	}
342	return ret;
343}
344
345/*
346 * Check how effective readahead is being.  If the amount of started IO is
347 * less than expected then the file is partly or fully in pagecache and
348 * readahead isn't helping.
349 *
350 */
351static inline int check_ra_success(struct file_ra_state *ra,
352			unsigned long nr_to_read, unsigned long actual)
353{
354	if (actual == 0) {
355		ra->cache_hit += nr_to_read;
356		if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
357			ra_off(ra);
358			ra->flags |= RA_FLAG_INCACHE;
359			return 0;
360		}
361	} else {
362		ra->cache_hit=0;
363	}
364	return 1;
365}
366
367/*
368 * This version skips the IO if the queue is read-congested, and will tell the
369 * block layer to abandon the readahead if request allocation would block.
370 *
371 * force_page_cache_readahead() will ignore queue congestion and will block on
372 * request queues.
373 */
374int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
375			pgoff_t offset, unsigned long nr_to_read)
376{
377	if (bdi_read_congested(mapping->backing_dev_info))
378		return -1;
379
380	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
381}
382
383/*
384 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
385 * is set wait till the read completes.  Otherwise attempt to read without
386 * blocking.
387 * Returns 1 meaning 'success' if read is succesfull without switching off
388 * readhaead mode. Otherwise return failure.
389 */
390static int
391blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
392			pgoff_t offset, unsigned long nr_to_read,
393			struct file_ra_state *ra, int block)
394{
395	int actual;
396
397	if (!block && bdi_read_congested(mapping->backing_dev_info))
398		return 0;
399
400	actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
401
402	return check_ra_success(ra, nr_to_read, actual);
403}
404
405static int make_ahead_window(struct address_space *mapping, struct file *filp,
406				struct file_ra_state *ra, int force)
407{
408	int block, ret;
409
410	ra->ahead_size = get_next_ra_size(ra);
411	ra->ahead_start = ra->start + ra->size;
412
413	block = force || (ra->prev_page >= ra->ahead_start);
414	ret = blockable_page_cache_readahead(mapping, filp,
415			ra->ahead_start, ra->ahead_size, ra, block);
416
417	if (!ret && !force) {
418		/* A read failure in blocking mode, implies pages are
419		 * all cached. So we can safely assume we have taken
420		 * care of all the pages requested in this call.
421		 * A read failure in non-blocking mode, implies we are
422		 * reading more pages than requested in this call.  So
423		 * we safely assume we have taken care of all the pages
424		 * requested in this call.
425		 *
426		 * Just reset the ahead window in case we failed due to
427		 * congestion.  The ahead window will any way be closed
428		 * in case we failed due to excessive page cache hits.
429		 */
430		ra->ahead_start = 0;
431		ra->ahead_size = 0;
432	}
433
434	return ret;
435}
436
437/**
438 * page_cache_readahead - generic adaptive readahead
439 * @mapping: address_space which holds the pagecache and I/O vectors
440 * @ra: file_ra_state which holds the readahead state
441 * @filp: passed on to ->readpage() and ->readpages()
442 * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
443 * @req_size: hint: total size of the read which the caller is performing in
444 *            PAGE_CACHE_SIZE units
445 *
446 * page_cache_readahead() is the main function.  If performs the adaptive
447 * readahead window size management and submits the readahead I/O.
448 *
449 * Note that @filp is purely used for passing on to the ->readpage[s]()
450 * handler: it may refer to a different file from @mapping (so we may not use
451 * @filp->f_mapping or @filp->f_dentry->d_inode here).
452 * Also, @ra may not be equal to &@filp->f_ra.
453 *
454 */
455unsigned long
456page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
457		     struct file *filp, pgoff_t offset, unsigned long req_size)
458{
459	unsigned long max, newsize;
460	int sequential;
461
462	/*
463	 * We avoid doing extra work and bogusly perturbing the readahead
464	 * window expansion logic.
465	 */
466	if (offset == ra->prev_page && --req_size)
467		++offset;
468
469	/* Note that prev_page == -1 if it is a first read */
470	sequential = (offset == ra->prev_page + 1);
471	ra->prev_page = offset;
472
473	max = get_max_readahead(ra);
474	newsize = min(req_size, max);
475
476	/* No readahead or sub-page sized read or file already in cache */
477	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
478		goto out;
479
480	ra->prev_page += newsize - 1;
481
482	/*
483	 * Special case - first read at start of file. We'll assume it's
484	 * a whole-file read and grow the window fast.  Or detect first
485	 * sequential access
486	 */
487	if (sequential && ra->size == 0) {
488		ra->size = get_init_ra_size(newsize, max);
489		ra->start = offset;
490		if (!blockable_page_cache_readahead(mapping, filp, offset,
491							 ra->size, ra, 1))
492			goto out;
493
494		/*
495		 * If the request size is larger than our max readahead, we
496		 * at least want to be sure that we get 2 IOs in flight and
497		 * we know that we will definitly need the new I/O.
498		 * once we do this, subsequent calls should be able to overlap
499		 * IOs,* thus preventing stalls. so issue the ahead window
500		 * immediately.
501		 */
502		if (req_size >= max)
503			make_ahead_window(mapping, filp, ra, 1);
504
505		goto out;
506	}
507
508	/*
509	 * Now handle the random case:
510	 * partial page reads and first access were handled above,
511	 * so this must be the next page otherwise it is random
512	 */
513	if (!sequential) {
514		ra_off(ra);
515		blockable_page_cache_readahead(mapping, filp, offset,
516				 newsize, ra, 1);
517		goto out;
518	}
519
520	/*
521	 * If we get here we are doing sequential IO and this was not the first
522	 * occurence (ie we have an existing window)
523	 */
524
525	if (ra->ahead_start == 0) {	 /* no ahead window yet */
526		if (!make_ahead_window(mapping, filp, ra, 0))
527			goto out;
528	}
529	/*
530	 * Already have an ahead window, check if we crossed into it.
531	 * If so, shift windows and issue a new ahead window.
532	 * Only return the #pages that are in the current window, so that
533	 * we get called back on the first page of the ahead window which
534	 * will allow us to submit more IO.
535	 */
536	if (ra->prev_page >= ra->ahead_start) {
537		ra->start = ra->ahead_start;
538		ra->size = ra->ahead_size;
539		make_ahead_window(mapping, filp, ra, 0);
540	}
541
542out:
543	return ra->prev_page + 1;
544}
545
546/*
547 * handle_ra_miss() is called when it is known that a page which should have
548 * been present in the pagecache (we just did some readahead there) was in fact
549 * not found.  This will happen if it was evicted by the VM (readahead
550 * thrashing)
551 *
552 * Turn on the cache miss flag in the RA struct, this will cause the RA code
553 * to reduce the RA size on the next read.
554 */
555void handle_ra_miss(struct address_space *mapping,
556		struct file_ra_state *ra, pgoff_t offset)
557{
558	ra->flags |= RA_FLAG_MISS;
559	ra->flags &= ~RA_FLAG_INCACHE;
560	ra->cache_hit = 0;
561}
562
563/*
564 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
565 * sensible upper limit.
566 */
567unsigned long max_sane_readahead(unsigned long nr)
568{
569	unsigned long active;
570	unsigned long inactive;
571	unsigned long free;
572
573	__get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
574	return min(nr, (inactive + free) / 2);
575}
576