cfq-iosched.c revision b0b78f81a5a3bfe9442fcc3a9c13e298a742556a
1/*
2 *  CFQ, or complete fairness queueing, disk scheduler.
3 *
4 *  Based on ideas from a previously unfinished io
5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
12#include <linux/rbtree.h>
13#include <linux/ioprio.h>
14#include <linux/blktrace_api.h>
15
16/*
17 * tunables
18 */
19/* max queue in one round of service */
20static const int cfq_quantum = 4;
21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
22/* maximum backwards seek, in KiB */
23static const int cfq_back_max = 16 * 1024;
24/* penalty of a backwards seek */
25static const int cfq_back_penalty = 2;
26static const int cfq_slice_sync = HZ / 10;
27static int cfq_slice_async = HZ / 25;
28static const int cfq_slice_async_rq = 2;
29static int cfq_slice_idle = HZ / 125;
30
31/*
32 * offset from end of service tree
33 */
34#define CFQ_IDLE_DELAY		(HZ / 5)
35
36/*
37 * below this threshold, we consider thinktime immediate
38 */
39#define CFQ_MIN_TT		(2)
40
41#define CFQ_SLICE_SCALE		(5)
42#define CFQ_HW_QUEUE_MIN	(5)
43
44#define RQ_CIC(rq)		\
45	((struct cfq_io_context *) (rq)->elevator_private)
46#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
47
48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool;
50
51static DEFINE_PER_CPU(unsigned long, ioc_count);
52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock);
54
55#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
56#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
57#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
58
59#define ASYNC			(0)
60#define SYNC			(1)
61
62#define sample_valid(samples)	((samples) > 80)
63
64/*
65 * Most of our rbtree usage is for sorting with min extraction, so
66 * if we cache the leftmost node we don't have to walk down the tree
67 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
68 * move this into the elevator for the rq sorting as well.
69 */
70struct cfq_rb_root {
71	struct rb_root rb;
72	struct rb_node *left;
73};
74#define CFQ_RB_ROOT	(struct cfq_rb_root) { RB_ROOT, NULL, }
75
76/*
77 * Per block device queue structure
78 */
79struct cfq_data {
80	struct request_queue *queue;
81
82	/*
83	 * rr list of queues with requests and the count of them
84	 */
85	struct cfq_rb_root service_tree;
86	unsigned int busy_queues;
87	/*
88	 * Used to track any pending rt requests so we can pre-empt current
89	 * non-RT cfqq in service when this value is non-zero.
90	 */
91	unsigned int busy_rt_queues;
92
93	int rq_in_driver;
94	int sync_flight;
95
96	/*
97	 * queue-depth detection
98	 */
99	int rq_queued;
100	int hw_tag;
101	int hw_tag_samples;
102	int rq_in_driver_peak;
103
104	/*
105	 * idle window management
106	 */
107	struct timer_list idle_slice_timer;
108	struct work_struct unplug_work;
109
110	struct cfq_queue *active_queue;
111	struct cfq_io_context *active_cic;
112
113	/*
114	 * async queue for each priority case
115	 */
116	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
117	struct cfq_queue *async_idle_cfqq;
118
119	sector_t last_position;
120	unsigned long last_end_request;
121
122	/*
123	 * tunables, see top of file
124	 */
125	unsigned int cfq_quantum;
126	unsigned int cfq_fifo_expire[2];
127	unsigned int cfq_back_penalty;
128	unsigned int cfq_back_max;
129	unsigned int cfq_slice[2];
130	unsigned int cfq_slice_async_rq;
131	unsigned int cfq_slice_idle;
132
133	struct list_head cic_list;
134};
135
136/*
137 * Per process-grouping structure
138 */
139struct cfq_queue {
140	/* reference count */
141	atomic_t ref;
142	/* various state flags, see below */
143	unsigned int flags;
144	/* parent cfq_data */
145	struct cfq_data *cfqd;
146	/* service_tree member */
147	struct rb_node rb_node;
148	/* service_tree key */
149	unsigned long rb_key;
150	/* sorted list of pending requests */
151	struct rb_root sort_list;
152	/* if fifo isn't expired, next request to serve */
153	struct request *next_rq;
154	/* requests queued in sort_list */
155	int queued[2];
156	/* currently allocated requests */
157	int allocated[2];
158	/* fifo list of requests in sort_list */
159	struct list_head fifo;
160
161	unsigned long slice_end;
162	long slice_resid;
163	unsigned int slice_dispatch;
164
165	/* pending metadata requests */
166	int meta_pending;
167	/* number of requests that are on the dispatch list or inside driver */
168	int dispatched;
169
170	/* io prio of this group */
171	unsigned short ioprio, org_ioprio;
172	unsigned short ioprio_class, org_ioprio_class;
173
174	pid_t pid;
175};
176
177enum cfqq_state_flags {
178	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
179	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
180	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
181	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
182	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
183	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
184	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
185	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
186	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
187	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
188};
189
190#define CFQ_CFQQ_FNS(name)						\
191static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
192{									\
193	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
194}									\
195static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
196{									\
197	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
198}									\
199static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
200{									\
201	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
202}
203
204CFQ_CFQQ_FNS(on_rr);
205CFQ_CFQQ_FNS(wait_request);
206CFQ_CFQQ_FNS(must_dispatch);
207CFQ_CFQQ_FNS(must_alloc);
208CFQ_CFQQ_FNS(must_alloc_slice);
209CFQ_CFQQ_FNS(fifo_expire);
210CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(slice_new);
213CFQ_CFQQ_FNS(sync);
214#undef CFQ_CFQQ_FNS
215
216#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
217	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
218#define cfq_log(cfqd, fmt, args...)	\
219	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
220
221static void cfq_dispatch_insert(struct request_queue *, struct request *);
222static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
223				       struct io_context *, gfp_t);
224static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
225						struct io_context *);
226
227static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
228					    int is_sync)
229{
230	return cic->cfqq[!!is_sync];
231}
232
233static inline void cic_set_cfqq(struct cfq_io_context *cic,
234				struct cfq_queue *cfqq, int is_sync)
235{
236	cic->cfqq[!!is_sync] = cfqq;
237}
238
239/*
240 * We regard a request as SYNC, if it's either a read or has the SYNC bit
241 * set (in which case it could also be direct WRITE).
242 */
243static inline int cfq_bio_sync(struct bio *bio)
244{
245	if (bio_data_dir(bio) == READ || bio_sync(bio))
246		return 1;
247
248	return 0;
249}
250
251/*
252 * scheduler run of queue, if there are requests pending and no one in the
253 * driver that will restart queueing
254 */
255static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
256{
257	if (cfqd->busy_queues) {
258		cfq_log(cfqd, "schedule dispatch");
259		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
260	}
261}
262
263static int cfq_queue_empty(struct request_queue *q)
264{
265	struct cfq_data *cfqd = q->elevator->elevator_data;
266
267	return !cfqd->busy_queues;
268}
269
270/*
271 * Scale schedule slice based on io priority. Use the sync time slice only
272 * if a queue is marked sync and has sync io queued. A sync queue with async
273 * io only, should not get full sync slice length.
274 */
275static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
276				 unsigned short prio)
277{
278	const int base_slice = cfqd->cfq_slice[sync];
279
280	WARN_ON(prio >= IOPRIO_BE_NR);
281
282	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
283}
284
285static inline int
286cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
287{
288	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
289}
290
291static inline void
292cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
293{
294	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
295	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
296}
297
298/*
299 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
300 * isn't valid until the first request from the dispatch is activated
301 * and the slice time set.
302 */
303static inline int cfq_slice_used(struct cfq_queue *cfqq)
304{
305	if (cfq_cfqq_slice_new(cfqq))
306		return 0;
307	if (time_before(jiffies, cfqq->slice_end))
308		return 0;
309
310	return 1;
311}
312
313/*
314 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
315 * We choose the request that is closest to the head right now. Distance
316 * behind the head is penalized and only allowed to a certain extent.
317 */
318static struct request *
319cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
320{
321	sector_t last, s1, s2, d1 = 0, d2 = 0;
322	unsigned long back_max;
323#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
324#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
325	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
326
327	if (rq1 == NULL || rq1 == rq2)
328		return rq2;
329	if (rq2 == NULL)
330		return rq1;
331
332	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
333		return rq1;
334	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
335		return rq2;
336	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
337		return rq1;
338	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
339		return rq2;
340
341	s1 = rq1->sector;
342	s2 = rq2->sector;
343
344	last = cfqd->last_position;
345
346	/*
347	 * by definition, 1KiB is 2 sectors
348	 */
349	back_max = cfqd->cfq_back_max * 2;
350
351	/*
352	 * Strict one way elevator _except_ in the case where we allow
353	 * short backward seeks which are biased as twice the cost of a
354	 * similar forward seek.
355	 */
356	if (s1 >= last)
357		d1 = s1 - last;
358	else if (s1 + back_max >= last)
359		d1 = (last - s1) * cfqd->cfq_back_penalty;
360	else
361		wrap |= CFQ_RQ1_WRAP;
362
363	if (s2 >= last)
364		d2 = s2 - last;
365	else if (s2 + back_max >= last)
366		d2 = (last - s2) * cfqd->cfq_back_penalty;
367	else
368		wrap |= CFQ_RQ2_WRAP;
369
370	/* Found required data */
371
372	/*
373	 * By doing switch() on the bit mask "wrap" we avoid having to
374	 * check two variables for all permutations: --> faster!
375	 */
376	switch (wrap) {
377	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
378		if (d1 < d2)
379			return rq1;
380		else if (d2 < d1)
381			return rq2;
382		else {
383			if (s1 >= s2)
384				return rq1;
385			else
386				return rq2;
387		}
388
389	case CFQ_RQ2_WRAP:
390		return rq1;
391	case CFQ_RQ1_WRAP:
392		return rq2;
393	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
394	default:
395		/*
396		 * Since both rqs are wrapped,
397		 * start with the one that's further behind head
398		 * (--> only *one* back seek required),
399		 * since back seek takes more time than forward.
400		 */
401		if (s1 <= s2)
402			return rq1;
403		else
404			return rq2;
405	}
406}
407
408/*
409 * The below is leftmost cache rbtree addon
410 */
411static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
412{
413	if (!root->left)
414		root->left = rb_first(&root->rb);
415
416	if (root->left)
417		return rb_entry(root->left, struct cfq_queue, rb_node);
418
419	return NULL;
420}
421
422static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
423{
424	if (root->left == n)
425		root->left = NULL;
426
427	rb_erase(n, &root->rb);
428	RB_CLEAR_NODE(n);
429}
430
431/*
432 * would be nice to take fifo expire time into account as well
433 */
434static struct request *
435cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
436		  struct request *last)
437{
438	struct rb_node *rbnext = rb_next(&last->rb_node);
439	struct rb_node *rbprev = rb_prev(&last->rb_node);
440	struct request *next = NULL, *prev = NULL;
441
442	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
443
444	if (rbprev)
445		prev = rb_entry_rq(rbprev);
446
447	if (rbnext)
448		next = rb_entry_rq(rbnext);
449	else {
450		rbnext = rb_first(&cfqq->sort_list);
451		if (rbnext && rbnext != &last->rb_node)
452			next = rb_entry_rq(rbnext);
453	}
454
455	return cfq_choose_req(cfqd, next, prev);
456}
457
458static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
459				      struct cfq_queue *cfqq)
460{
461	/*
462	 * just an approximation, should be ok.
463	 */
464	return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
465		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
466}
467
468/*
469 * The cfqd->service_tree holds all pending cfq_queue's that have
470 * requests waiting to be processed. It is sorted in the order that
471 * we will service the queues.
472 */
473static void cfq_service_tree_add(struct cfq_data *cfqd,
474				    struct cfq_queue *cfqq, int add_front)
475{
476	struct rb_node **p, *parent;
477	struct cfq_queue *__cfqq;
478	unsigned long rb_key;
479	int left;
480
481	if (cfq_class_idle(cfqq)) {
482		rb_key = CFQ_IDLE_DELAY;
483		parent = rb_last(&cfqd->service_tree.rb);
484		if (parent && parent != &cfqq->rb_node) {
485			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
486			rb_key += __cfqq->rb_key;
487		} else
488			rb_key += jiffies;
489	} else if (!add_front) {
490		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
491		rb_key += cfqq->slice_resid;
492		cfqq->slice_resid = 0;
493	} else
494		rb_key = 0;
495
496	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
497		/*
498		 * same position, nothing more to do
499		 */
500		if (rb_key == cfqq->rb_key)
501			return;
502
503		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
504	}
505
506	left = 1;
507	parent = NULL;
508	p = &cfqd->service_tree.rb.rb_node;
509	while (*p) {
510		struct rb_node **n;
511
512		parent = *p;
513		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
514
515		/*
516		 * sort RT queues first, we always want to give
517		 * preference to them. IDLE queues goes to the back.
518		 * after that, sort on the next service time.
519		 */
520		if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
521			n = &(*p)->rb_left;
522		else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
523			n = &(*p)->rb_right;
524		else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
525			n = &(*p)->rb_left;
526		else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
527			n = &(*p)->rb_right;
528		else if (rb_key < __cfqq->rb_key)
529			n = &(*p)->rb_left;
530		else
531			n = &(*p)->rb_right;
532
533		if (n == &(*p)->rb_right)
534			left = 0;
535
536		p = n;
537	}
538
539	if (left)
540		cfqd->service_tree.left = &cfqq->rb_node;
541
542	cfqq->rb_key = rb_key;
543	rb_link_node(&cfqq->rb_node, parent, p);
544	rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
545}
546
547/*
548 * Update cfqq's position in the service tree.
549 */
550static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
551{
552	/*
553	 * Resorting requires the cfqq to be on the RR list already.
554	 */
555	if (cfq_cfqq_on_rr(cfqq))
556		cfq_service_tree_add(cfqd, cfqq, 0);
557}
558
559/*
560 * add to busy list of queues for service, trying to be fair in ordering
561 * the pending list according to last request service
562 */
563static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
564{
565	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
566	BUG_ON(cfq_cfqq_on_rr(cfqq));
567	cfq_mark_cfqq_on_rr(cfqq);
568	cfqd->busy_queues++;
569	if (cfq_class_rt(cfqq))
570		cfqd->busy_rt_queues++;
571
572	cfq_resort_rr_list(cfqd, cfqq);
573}
574
575/*
576 * Called when the cfqq no longer has requests pending, remove it from
577 * the service tree.
578 */
579static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
580{
581	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
582	BUG_ON(!cfq_cfqq_on_rr(cfqq));
583	cfq_clear_cfqq_on_rr(cfqq);
584
585	if (!RB_EMPTY_NODE(&cfqq->rb_node))
586		cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
587
588	BUG_ON(!cfqd->busy_queues);
589	cfqd->busy_queues--;
590	if (cfq_class_rt(cfqq))
591		cfqd->busy_rt_queues--;
592}
593
594/*
595 * rb tree support functions
596 */
597static void cfq_del_rq_rb(struct request *rq)
598{
599	struct cfq_queue *cfqq = RQ_CFQQ(rq);
600	struct cfq_data *cfqd = cfqq->cfqd;
601	const int sync = rq_is_sync(rq);
602
603	BUG_ON(!cfqq->queued[sync]);
604	cfqq->queued[sync]--;
605
606	elv_rb_del(&cfqq->sort_list, rq);
607
608	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
609		cfq_del_cfqq_rr(cfqd, cfqq);
610}
611
612static void cfq_add_rq_rb(struct request *rq)
613{
614	struct cfq_queue *cfqq = RQ_CFQQ(rq);
615	struct cfq_data *cfqd = cfqq->cfqd;
616	struct request *__alias;
617
618	cfqq->queued[rq_is_sync(rq)]++;
619
620	/*
621	 * looks a little odd, but the first insert might return an alias.
622	 * if that happens, put the alias on the dispatch list
623	 */
624	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
625		cfq_dispatch_insert(cfqd->queue, __alias);
626
627	if (!cfq_cfqq_on_rr(cfqq))
628		cfq_add_cfqq_rr(cfqd, cfqq);
629
630	/*
631	 * check if this request is a better next-serve candidate
632	 */
633	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
634	BUG_ON(!cfqq->next_rq);
635}
636
637static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
638{
639	elv_rb_del(&cfqq->sort_list, rq);
640	cfqq->queued[rq_is_sync(rq)]--;
641	cfq_add_rq_rb(rq);
642}
643
644static struct request *
645cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
646{
647	struct task_struct *tsk = current;
648	struct cfq_io_context *cic;
649	struct cfq_queue *cfqq;
650
651	cic = cfq_cic_lookup(cfqd, tsk->io_context);
652	if (!cic)
653		return NULL;
654
655	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
656	if (cfqq) {
657		sector_t sector = bio->bi_sector + bio_sectors(bio);
658
659		return elv_rb_find(&cfqq->sort_list, sector);
660	}
661
662	return NULL;
663}
664
665static void cfq_activate_request(struct request_queue *q, struct request *rq)
666{
667	struct cfq_data *cfqd = q->elevator->elevator_data;
668
669	cfqd->rq_in_driver++;
670	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
671						cfqd->rq_in_driver);
672
673	cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
674}
675
676static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
677{
678	struct cfq_data *cfqd = q->elevator->elevator_data;
679
680	WARN_ON(!cfqd->rq_in_driver);
681	cfqd->rq_in_driver--;
682	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
683						cfqd->rq_in_driver);
684}
685
686static void cfq_remove_request(struct request *rq)
687{
688	struct cfq_queue *cfqq = RQ_CFQQ(rq);
689
690	if (cfqq->next_rq == rq)
691		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
692
693	list_del_init(&rq->queuelist);
694	cfq_del_rq_rb(rq);
695
696	cfqq->cfqd->rq_queued--;
697	if (rq_is_meta(rq)) {
698		WARN_ON(!cfqq->meta_pending);
699		cfqq->meta_pending--;
700	}
701}
702
703static int cfq_merge(struct request_queue *q, struct request **req,
704		     struct bio *bio)
705{
706	struct cfq_data *cfqd = q->elevator->elevator_data;
707	struct request *__rq;
708
709	__rq = cfq_find_rq_fmerge(cfqd, bio);
710	if (__rq && elv_rq_merge_ok(__rq, bio)) {
711		*req = __rq;
712		return ELEVATOR_FRONT_MERGE;
713	}
714
715	return ELEVATOR_NO_MERGE;
716}
717
718static void cfq_merged_request(struct request_queue *q, struct request *req,
719			       int type)
720{
721	if (type == ELEVATOR_FRONT_MERGE) {
722		struct cfq_queue *cfqq = RQ_CFQQ(req);
723
724		cfq_reposition_rq_rb(cfqq, req);
725	}
726}
727
728static void
729cfq_merged_requests(struct request_queue *q, struct request *rq,
730		    struct request *next)
731{
732	/*
733	 * reposition in fifo if next is older than rq
734	 */
735	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
736	    time_before(next->start_time, rq->start_time))
737		list_move(&rq->queuelist, &next->queuelist);
738
739	cfq_remove_request(next);
740}
741
742static int cfq_allow_merge(struct request_queue *q, struct request *rq,
743			   struct bio *bio)
744{
745	struct cfq_data *cfqd = q->elevator->elevator_data;
746	struct cfq_io_context *cic;
747	struct cfq_queue *cfqq;
748
749	/*
750	 * Disallow merge of a sync bio into an async request.
751	 */
752	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
753		return 0;
754
755	/*
756	 * Lookup the cfqq that this bio will be queued with. Allow
757	 * merge only if rq is queued there.
758	 */
759	cic = cfq_cic_lookup(cfqd, current->io_context);
760	if (!cic)
761		return 0;
762
763	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
764	if (cfqq == RQ_CFQQ(rq))
765		return 1;
766
767	return 0;
768}
769
770static void __cfq_set_active_queue(struct cfq_data *cfqd,
771				   struct cfq_queue *cfqq)
772{
773	if (cfqq) {
774		cfq_log_cfqq(cfqd, cfqq, "set_active");
775		cfqq->slice_end = 0;
776		cfqq->slice_dispatch = 0;
777
778		cfq_clear_cfqq_wait_request(cfqq);
779		cfq_clear_cfqq_must_dispatch(cfqq);
780		cfq_clear_cfqq_must_alloc_slice(cfqq);
781		cfq_clear_cfqq_fifo_expire(cfqq);
782		cfq_mark_cfqq_slice_new(cfqq);
783
784		del_timer(&cfqd->idle_slice_timer);
785	}
786
787	cfqd->active_queue = cfqq;
788}
789
790/*
791 * current cfqq expired its slice (or was too idle), select new one
792 */
793static void
794__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
795		    int timed_out)
796{
797	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
798
799	if (cfq_cfqq_wait_request(cfqq))
800		del_timer(&cfqd->idle_slice_timer);
801
802	cfq_clear_cfqq_wait_request(cfqq);
803
804	/*
805	 * store what was left of this slice, if the queue idled/timed out
806	 */
807	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
808		cfqq->slice_resid = cfqq->slice_end - jiffies;
809		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
810	}
811
812	cfq_resort_rr_list(cfqd, cfqq);
813
814	if (cfqq == cfqd->active_queue)
815		cfqd->active_queue = NULL;
816
817	if (cfqd->active_cic) {
818		put_io_context(cfqd->active_cic->ioc);
819		cfqd->active_cic = NULL;
820	}
821}
822
823static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
824{
825	struct cfq_queue *cfqq = cfqd->active_queue;
826
827	if (cfqq)
828		__cfq_slice_expired(cfqd, cfqq, timed_out);
829}
830
831/*
832 * Get next queue for service. Unless we have a queue preemption,
833 * we'll simply select the first cfqq in the service tree.
834 */
835static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
836{
837	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
838		return NULL;
839
840	return cfq_rb_first(&cfqd->service_tree);
841}
842
843/*
844 * Get and set a new active queue for service.
845 */
846static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
847{
848	struct cfq_queue *cfqq;
849
850	cfqq = cfq_get_next_queue(cfqd);
851	__cfq_set_active_queue(cfqd, cfqq);
852	return cfqq;
853}
854
855static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
856					  struct request *rq)
857{
858	if (rq->sector >= cfqd->last_position)
859		return rq->sector - cfqd->last_position;
860	else
861		return cfqd->last_position - rq->sector;
862}
863
864static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
865{
866	struct cfq_io_context *cic = cfqd->active_cic;
867
868	if (!sample_valid(cic->seek_samples))
869		return 0;
870
871	return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
872}
873
874static int cfq_close_cooperator(struct cfq_data *cfq_data,
875				struct cfq_queue *cfqq)
876{
877	/*
878	 * We should notice if some of the queues are cooperating, eg
879	 * working closely on the same area of the disk. In that case,
880	 * we can group them together and don't waste time idling.
881	 */
882	return 0;
883}
884
885#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
886
887static void cfq_arm_slice_timer(struct cfq_data *cfqd)
888{
889	struct cfq_queue *cfqq = cfqd->active_queue;
890	struct cfq_io_context *cic;
891	unsigned long sl;
892
893	/*
894	 * SSD device without seek penalty, disable idling. But only do so
895	 * for devices that support queuing, otherwise we still have a problem
896	 * with sync vs async workloads.
897	 */
898	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
899		return;
900
901	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
902	WARN_ON(cfq_cfqq_slice_new(cfqq));
903
904	/*
905	 * idle is disabled, either manually or by past process history
906	 */
907	if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
908		return;
909
910	/*
911	 * still requests with the driver, don't idle
912	 */
913	if (cfqd->rq_in_driver)
914		return;
915
916	/*
917	 * task has exited, don't wait
918	 */
919	cic = cfqd->active_cic;
920	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
921		return;
922
923	/*
924	 * See if this prio level has a good candidate
925	 */
926	if (cfq_close_cooperator(cfqd, cfqq) &&
927	    (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
928		return;
929
930	cfq_mark_cfqq_wait_request(cfqq);
931
932	/*
933	 * we don't want to idle for seeks, but we do want to allow
934	 * fair distribution of slice time for a process doing back-to-back
935	 * seeks. so allow a little bit of time for him to submit a new rq
936	 */
937	sl = cfqd->cfq_slice_idle;
938	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
939		sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
940
941	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
942	cfq_log(cfqd, "arm_idle: %lu", sl);
943}
944
945/*
946 * Move request from internal lists to the request queue dispatch list.
947 */
948static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
949{
950	struct cfq_data *cfqd = q->elevator->elevator_data;
951	struct cfq_queue *cfqq = RQ_CFQQ(rq);
952
953	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
954
955	cfq_remove_request(rq);
956	cfqq->dispatched++;
957	elv_dispatch_sort(q, rq);
958
959	if (cfq_cfqq_sync(cfqq))
960		cfqd->sync_flight++;
961}
962
963/*
964 * return expired entry, or NULL to just start from scratch in rbtree
965 */
966static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
967{
968	struct cfq_data *cfqd = cfqq->cfqd;
969	struct request *rq;
970	int fifo;
971
972	if (cfq_cfqq_fifo_expire(cfqq))
973		return NULL;
974
975	cfq_mark_cfqq_fifo_expire(cfqq);
976
977	if (list_empty(&cfqq->fifo))
978		return NULL;
979
980	fifo = cfq_cfqq_sync(cfqq);
981	rq = rq_entry_fifo(cfqq->fifo.next);
982
983	if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
984		rq = NULL;
985
986	cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
987	return rq;
988}
989
990static inline int
991cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
992{
993	const int base_rq = cfqd->cfq_slice_async_rq;
994
995	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
996
997	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
998}
999
1000/*
1001 * Select a queue for service. If we have a current active queue,
1002 * check whether to continue servicing it, or retrieve and set a new one.
1003 */
1004static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1005{
1006	struct cfq_queue *cfqq;
1007
1008	cfqq = cfqd->active_queue;
1009	if (!cfqq)
1010		goto new_queue;
1011
1012	/*
1013	 * The active queue has run out of time, expire it and select new.
1014	 */
1015	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1016		goto expire;
1017
1018	/*
1019	 * If we have a RT cfqq waiting, then we pre-empt the current non-rt
1020	 * cfqq.
1021	 */
1022	if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
1023		/*
1024		 * We simulate this as cfqq timed out so that it gets to bank
1025		 * the remaining of its time slice.
1026		 */
1027		cfq_log_cfqq(cfqd, cfqq, "preempt");
1028		cfq_slice_expired(cfqd, 1);
1029		goto new_queue;
1030	}
1031
1032	/*
1033	 * The active queue has requests and isn't expired, allow it to
1034	 * dispatch.
1035	 */
1036	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
1037		goto keep_queue;
1038
1039	/*
1040	 * No requests pending. If the active queue still has requests in
1041	 * flight or is idling for a new request, allow either of these
1042	 * conditions to happen (or time out) before selecting a new queue.
1043	 */
1044	if (timer_pending(&cfqd->idle_slice_timer) ||
1045	    (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
1046		cfqq = NULL;
1047		goto keep_queue;
1048	}
1049
1050expire:
1051	cfq_slice_expired(cfqd, 0);
1052new_queue:
1053	cfqq = cfq_set_active_queue(cfqd);
1054keep_queue:
1055	return cfqq;
1056}
1057
1058static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1059{
1060	int dispatched = 0;
1061
1062	while (cfqq->next_rq) {
1063		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1064		dispatched++;
1065	}
1066
1067	BUG_ON(!list_empty(&cfqq->fifo));
1068	return dispatched;
1069}
1070
1071/*
1072 * Drain our current requests. Used for barriers and when switching
1073 * io schedulers on-the-fly.
1074 */
1075static int cfq_forced_dispatch(struct cfq_data *cfqd)
1076{
1077	struct cfq_queue *cfqq;
1078	int dispatched = 0;
1079
1080	while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
1081		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1082
1083	cfq_slice_expired(cfqd, 0);
1084
1085	BUG_ON(cfqd->busy_queues);
1086
1087	cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
1088	return dispatched;
1089}
1090
1091/*
1092 * Dispatch a request from cfqq, moving them to the request queue
1093 * dispatch list.
1094 */
1095static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1096{
1097	struct request *rq;
1098
1099	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1100
1101	/*
1102	 * follow expired path, else get first next available
1103	 */
1104	rq = cfq_check_fifo(cfqq);
1105	if (!rq)
1106		rq = cfqq->next_rq;
1107
1108	/*
1109	 * insert request into driver dispatch list
1110	 */
1111	cfq_dispatch_insert(cfqd->queue, rq);
1112
1113	if (!cfqd->active_cic) {
1114		struct cfq_io_context *cic = RQ_CIC(rq);
1115
1116		atomic_inc(&cic->ioc->refcount);
1117		cfqd->active_cic = cic;
1118	}
1119}
1120
1121/*
1122 * Find the cfqq that we need to service and move a request from that to the
1123 * dispatch list
1124 */
1125static int cfq_dispatch_requests(struct request_queue *q, int force)
1126{
1127	struct cfq_data *cfqd = q->elevator->elevator_data;
1128	struct cfq_queue *cfqq;
1129	unsigned int max_dispatch;
1130
1131	if (!cfqd->busy_queues)
1132		return 0;
1133
1134	if (unlikely(force))
1135		return cfq_forced_dispatch(cfqd);
1136
1137	cfqq = cfq_select_queue(cfqd);
1138	if (!cfqq)
1139		return 0;
1140
1141	/*
1142	 * If this is an async queue and we have sync IO in flight, let it wait
1143	 */
1144	if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1145		return 0;
1146
1147	max_dispatch = cfqd->cfq_quantum;
1148	if (cfq_class_idle(cfqq))
1149		max_dispatch = 1;
1150
1151	/*
1152	 * Does this cfqq already have too much IO in flight?
1153	 */
1154	if (cfqq->dispatched >= max_dispatch) {
1155		/*
1156		 * idle queue must always only have a single IO in flight
1157		 */
1158		if (cfq_class_idle(cfqq))
1159			return 0;
1160
1161		/*
1162		 * We have other queues, don't allow more IO from this one
1163		 */
1164		if (cfqd->busy_queues > 1)
1165			return 0;
1166
1167		/*
1168		 * we are the only queue, allow up to 4 times of 'quantum'
1169		 */
1170		if (cfqq->dispatched >= 4 * max_dispatch)
1171			return 0;
1172	}
1173
1174	/*
1175	 * Dispatch a request from this cfqq
1176	 */
1177	cfq_dispatch_request(cfqd, cfqq);
1178	cfqq->slice_dispatch++;
1179	cfq_clear_cfqq_must_dispatch(cfqq);
1180
1181	/*
1182	 * expire an async queue immediately if it has used up its slice. idle
1183	 * queue always expire after 1 dispatch round.
1184	 */
1185	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1186	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1187	    cfq_class_idle(cfqq))) {
1188		cfqq->slice_end = jiffies + 1;
1189		cfq_slice_expired(cfqd, 0);
1190	}
1191
1192	cfq_log(cfqd, "dispatched a request");
1193	return 1;
1194}
1195
1196/*
1197 * task holds one reference to the queue, dropped when task exits. each rq
1198 * in-flight on this queue also holds a reference, dropped when rq is freed.
1199 *
1200 * queue lock must be held here.
1201 */
1202static void cfq_put_queue(struct cfq_queue *cfqq)
1203{
1204	struct cfq_data *cfqd = cfqq->cfqd;
1205
1206	BUG_ON(atomic_read(&cfqq->ref) <= 0);
1207
1208	if (!atomic_dec_and_test(&cfqq->ref))
1209		return;
1210
1211	cfq_log_cfqq(cfqd, cfqq, "put_queue");
1212	BUG_ON(rb_first(&cfqq->sort_list));
1213	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
1214	BUG_ON(cfq_cfqq_on_rr(cfqq));
1215
1216	if (unlikely(cfqd->active_queue == cfqq)) {
1217		__cfq_slice_expired(cfqd, cfqq, 0);
1218		cfq_schedule_dispatch(cfqd);
1219	}
1220
1221	kmem_cache_free(cfq_pool, cfqq);
1222}
1223
1224/*
1225 * Must always be called with the rcu_read_lock() held
1226 */
1227static void
1228__call_for_each_cic(struct io_context *ioc,
1229		    void (*func)(struct io_context *, struct cfq_io_context *))
1230{
1231	struct cfq_io_context *cic;
1232	struct hlist_node *n;
1233
1234	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1235		func(ioc, cic);
1236}
1237
1238/*
1239 * Call func for each cic attached to this ioc.
1240 */
1241static void
1242call_for_each_cic(struct io_context *ioc,
1243		  void (*func)(struct io_context *, struct cfq_io_context *))
1244{
1245	rcu_read_lock();
1246	__call_for_each_cic(ioc, func);
1247	rcu_read_unlock();
1248}
1249
1250static void cfq_cic_free_rcu(struct rcu_head *head)
1251{
1252	struct cfq_io_context *cic;
1253
1254	cic = container_of(head, struct cfq_io_context, rcu_head);
1255
1256	kmem_cache_free(cfq_ioc_pool, cic);
1257	elv_ioc_count_dec(ioc_count);
1258
1259	if (ioc_gone) {
1260		/*
1261		 * CFQ scheduler is exiting, grab exit lock and check
1262		 * the pending io context count. If it hits zero,
1263		 * complete ioc_gone and set it back to NULL
1264		 */
1265		spin_lock(&ioc_gone_lock);
1266		if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
1267			complete(ioc_gone);
1268			ioc_gone = NULL;
1269		}
1270		spin_unlock(&ioc_gone_lock);
1271	}
1272}
1273
1274static void cfq_cic_free(struct cfq_io_context *cic)
1275{
1276	call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
1277}
1278
1279static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1280{
1281	unsigned long flags;
1282
1283	BUG_ON(!cic->dead_key);
1284
1285	spin_lock_irqsave(&ioc->lock, flags);
1286	radix_tree_delete(&ioc->radix_root, cic->dead_key);
1287	hlist_del_rcu(&cic->cic_list);
1288	spin_unlock_irqrestore(&ioc->lock, flags);
1289
1290	cfq_cic_free(cic);
1291}
1292
1293/*
1294 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1295 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1296 * and ->trim() which is called with the task lock held
1297 */
1298static void cfq_free_io_context(struct io_context *ioc)
1299{
1300	/*
1301	 * ioc->refcount is zero here, or we are called from elv_unregister(),
1302	 * so no more cic's are allowed to be linked into this ioc.  So it
1303	 * should be ok to iterate over the known list, we will see all cic's
1304	 * since no new ones are added.
1305	 */
1306	__call_for_each_cic(ioc, cic_free_func);
1307}
1308
1309static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1310{
1311	if (unlikely(cfqq == cfqd->active_queue)) {
1312		__cfq_slice_expired(cfqd, cfqq, 0);
1313		cfq_schedule_dispatch(cfqd);
1314	}
1315
1316	cfq_put_queue(cfqq);
1317}
1318
1319static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1320					 struct cfq_io_context *cic)
1321{
1322	struct io_context *ioc = cic->ioc;
1323
1324	list_del_init(&cic->queue_list);
1325
1326	/*
1327	 * Make sure key == NULL is seen for dead queues
1328	 */
1329	smp_wmb();
1330	cic->dead_key = (unsigned long) cic->key;
1331	cic->key = NULL;
1332
1333	if (ioc->ioc_data == cic)
1334		rcu_assign_pointer(ioc->ioc_data, NULL);
1335
1336	if (cic->cfqq[ASYNC]) {
1337		cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
1338		cic->cfqq[ASYNC] = NULL;
1339	}
1340
1341	if (cic->cfqq[SYNC]) {
1342		cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
1343		cic->cfqq[SYNC] = NULL;
1344	}
1345}
1346
1347static void cfq_exit_single_io_context(struct io_context *ioc,
1348				       struct cfq_io_context *cic)
1349{
1350	struct cfq_data *cfqd = cic->key;
1351
1352	if (cfqd) {
1353		struct request_queue *q = cfqd->queue;
1354		unsigned long flags;
1355
1356		spin_lock_irqsave(q->queue_lock, flags);
1357
1358		/*
1359		 * Ensure we get a fresh copy of the ->key to prevent
1360		 * race between exiting task and queue
1361		 */
1362		smp_read_barrier_depends();
1363		if (cic->key)
1364			__cfq_exit_single_io_context(cfqd, cic);
1365
1366		spin_unlock_irqrestore(q->queue_lock, flags);
1367	}
1368}
1369
1370/*
1371 * The process that ioc belongs to has exited, we need to clean up
1372 * and put the internal structures we have that belongs to that process.
1373 */
1374static void cfq_exit_io_context(struct io_context *ioc)
1375{
1376	call_for_each_cic(ioc, cfq_exit_single_io_context);
1377}
1378
1379static struct cfq_io_context *
1380cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1381{
1382	struct cfq_io_context *cic;
1383
1384	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1385							cfqd->queue->node);
1386	if (cic) {
1387		cic->last_end_request = jiffies;
1388		INIT_LIST_HEAD(&cic->queue_list);
1389		INIT_HLIST_NODE(&cic->cic_list);
1390		cic->dtor = cfq_free_io_context;
1391		cic->exit = cfq_exit_io_context;
1392		elv_ioc_count_inc(ioc_count);
1393	}
1394
1395	return cic;
1396}
1397
1398static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
1399{
1400	struct task_struct *tsk = current;
1401	int ioprio_class;
1402
1403	if (!cfq_cfqq_prio_changed(cfqq))
1404		return;
1405
1406	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
1407	switch (ioprio_class) {
1408	default:
1409		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1410	case IOPRIO_CLASS_NONE:
1411		/*
1412		 * no prio set, inherit CPU scheduling settings
1413		 */
1414		cfqq->ioprio = task_nice_ioprio(tsk);
1415		cfqq->ioprio_class = task_nice_ioclass(tsk);
1416		break;
1417	case IOPRIO_CLASS_RT:
1418		cfqq->ioprio = task_ioprio(ioc);
1419		cfqq->ioprio_class = IOPRIO_CLASS_RT;
1420		break;
1421	case IOPRIO_CLASS_BE:
1422		cfqq->ioprio = task_ioprio(ioc);
1423		cfqq->ioprio_class = IOPRIO_CLASS_BE;
1424		break;
1425	case IOPRIO_CLASS_IDLE:
1426		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1427		cfqq->ioprio = 7;
1428		cfq_clear_cfqq_idle_window(cfqq);
1429		break;
1430	}
1431
1432	/*
1433	 * keep track of original prio settings in case we have to temporarily
1434	 * elevate the priority of this queue
1435	 */
1436	cfqq->org_ioprio = cfqq->ioprio;
1437	cfqq->org_ioprio_class = cfqq->ioprio_class;
1438	cfq_clear_cfqq_prio_changed(cfqq);
1439}
1440
1441static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
1442{
1443	struct cfq_data *cfqd = cic->key;
1444	struct cfq_queue *cfqq;
1445	unsigned long flags;
1446
1447	if (unlikely(!cfqd))
1448		return;
1449
1450	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1451
1452	cfqq = cic->cfqq[ASYNC];
1453	if (cfqq) {
1454		struct cfq_queue *new_cfqq;
1455		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
1456		if (new_cfqq) {
1457			cic->cfqq[ASYNC] = new_cfqq;
1458			cfq_put_queue(cfqq);
1459		}
1460	}
1461
1462	cfqq = cic->cfqq[SYNC];
1463	if (cfqq)
1464		cfq_mark_cfqq_prio_changed(cfqq);
1465
1466	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1467}
1468
1469static void cfq_ioc_set_ioprio(struct io_context *ioc)
1470{
1471	call_for_each_cic(ioc, changed_ioprio);
1472	ioc->ioprio_changed = 0;
1473}
1474
1475static struct cfq_queue *
1476cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1477		     struct io_context *ioc, gfp_t gfp_mask)
1478{
1479	struct cfq_queue *cfqq, *new_cfqq = NULL;
1480	struct cfq_io_context *cic;
1481
1482retry:
1483	cic = cfq_cic_lookup(cfqd, ioc);
1484	/* cic always exists here */
1485	cfqq = cic_to_cfqq(cic, is_sync);
1486
1487	if (!cfqq) {
1488		if (new_cfqq) {
1489			cfqq = new_cfqq;
1490			new_cfqq = NULL;
1491		} else if (gfp_mask & __GFP_WAIT) {
1492			/*
1493			 * Inform the allocator of the fact that we will
1494			 * just repeat this allocation if it fails, to allow
1495			 * the allocator to do whatever it needs to attempt to
1496			 * free memory.
1497			 */
1498			spin_unlock_irq(cfqd->queue->queue_lock);
1499			new_cfqq = kmem_cache_alloc_node(cfq_pool,
1500					gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1501					cfqd->queue->node);
1502			spin_lock_irq(cfqd->queue->queue_lock);
1503			goto retry;
1504		} else {
1505			cfqq = kmem_cache_alloc_node(cfq_pool,
1506					gfp_mask | __GFP_ZERO,
1507					cfqd->queue->node);
1508			if (!cfqq)
1509				goto out;
1510		}
1511
1512		RB_CLEAR_NODE(&cfqq->rb_node);
1513		INIT_LIST_HEAD(&cfqq->fifo);
1514
1515		atomic_set(&cfqq->ref, 0);
1516		cfqq->cfqd = cfqd;
1517
1518		cfq_mark_cfqq_prio_changed(cfqq);
1519
1520		cfq_init_prio_data(cfqq, ioc);
1521
1522		if (is_sync) {
1523			if (!cfq_class_idle(cfqq))
1524				cfq_mark_cfqq_idle_window(cfqq);
1525			cfq_mark_cfqq_sync(cfqq);
1526		}
1527		cfqq->pid = current->pid;
1528		cfq_log_cfqq(cfqd, cfqq, "alloced");
1529	}
1530
1531	if (new_cfqq)
1532		kmem_cache_free(cfq_pool, new_cfqq);
1533
1534out:
1535	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1536	return cfqq;
1537}
1538
1539static struct cfq_queue **
1540cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
1541{
1542	switch (ioprio_class) {
1543	case IOPRIO_CLASS_RT:
1544		return &cfqd->async_cfqq[0][ioprio];
1545	case IOPRIO_CLASS_BE:
1546		return &cfqd->async_cfqq[1][ioprio];
1547	case IOPRIO_CLASS_IDLE:
1548		return &cfqd->async_idle_cfqq;
1549	default:
1550		BUG();
1551	}
1552}
1553
1554static struct cfq_queue *
1555cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
1556	      gfp_t gfp_mask)
1557{
1558	const int ioprio = task_ioprio(ioc);
1559	const int ioprio_class = task_ioprio_class(ioc);
1560	struct cfq_queue **async_cfqq = NULL;
1561	struct cfq_queue *cfqq = NULL;
1562
1563	if (!is_sync) {
1564		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
1565		cfqq = *async_cfqq;
1566	}
1567
1568	if (!cfqq) {
1569		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
1570		if (!cfqq)
1571			return NULL;
1572	}
1573
1574	/*
1575	 * pin the queue now that it's allocated, scheduler exit will prune it
1576	 */
1577	if (!is_sync && !(*async_cfqq)) {
1578		atomic_inc(&cfqq->ref);
1579		*async_cfqq = cfqq;
1580	}
1581
1582	atomic_inc(&cfqq->ref);
1583	return cfqq;
1584}
1585
1586/*
1587 * We drop cfq io contexts lazily, so we may find a dead one.
1588 */
1589static void
1590cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
1591		  struct cfq_io_context *cic)
1592{
1593	unsigned long flags;
1594
1595	WARN_ON(!list_empty(&cic->queue_list));
1596
1597	spin_lock_irqsave(&ioc->lock, flags);
1598
1599	BUG_ON(ioc->ioc_data == cic);
1600
1601	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
1602	hlist_del_rcu(&cic->cic_list);
1603	spin_unlock_irqrestore(&ioc->lock, flags);
1604
1605	cfq_cic_free(cic);
1606}
1607
1608static struct cfq_io_context *
1609cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1610{
1611	struct cfq_io_context *cic;
1612	unsigned long flags;
1613	void *k;
1614
1615	if (unlikely(!ioc))
1616		return NULL;
1617
1618	rcu_read_lock();
1619
1620	/*
1621	 * we maintain a last-hit cache, to avoid browsing over the tree
1622	 */
1623	cic = rcu_dereference(ioc->ioc_data);
1624	if (cic && cic->key == cfqd) {
1625		rcu_read_unlock();
1626		return cic;
1627	}
1628
1629	do {
1630		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
1631		rcu_read_unlock();
1632		if (!cic)
1633			break;
1634		/* ->key must be copied to avoid race with cfq_exit_queue() */
1635		k = cic->key;
1636		if (unlikely(!k)) {
1637			cfq_drop_dead_cic(cfqd, ioc, cic);
1638			rcu_read_lock();
1639			continue;
1640		}
1641
1642		spin_lock_irqsave(&ioc->lock, flags);
1643		rcu_assign_pointer(ioc->ioc_data, cic);
1644		spin_unlock_irqrestore(&ioc->lock, flags);
1645		break;
1646	} while (1);
1647
1648	return cic;
1649}
1650
1651/*
1652 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
1653 * the process specific cfq io context when entered from the block layer.
1654 * Also adds the cic to a per-cfqd list, used when this queue is removed.
1655 */
1656static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1657			struct cfq_io_context *cic, gfp_t gfp_mask)
1658{
1659	unsigned long flags;
1660	int ret;
1661
1662	ret = radix_tree_preload(gfp_mask);
1663	if (!ret) {
1664		cic->ioc = ioc;
1665		cic->key = cfqd;
1666
1667		spin_lock_irqsave(&ioc->lock, flags);
1668		ret = radix_tree_insert(&ioc->radix_root,
1669						(unsigned long) cfqd, cic);
1670		if (!ret)
1671			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
1672		spin_unlock_irqrestore(&ioc->lock, flags);
1673
1674		radix_tree_preload_end();
1675
1676		if (!ret) {
1677			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1678			list_add(&cic->queue_list, &cfqd->cic_list);
1679			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1680		}
1681	}
1682
1683	if (ret)
1684		printk(KERN_ERR "cfq: cic link failed!\n");
1685
1686	return ret;
1687}
1688
1689/*
1690 * Setup general io context and cfq io context. There can be several cfq
1691 * io contexts per general io context, if this process is doing io to more
1692 * than one device managed by cfq.
1693 */
1694static struct cfq_io_context *
1695cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1696{
1697	struct io_context *ioc = NULL;
1698	struct cfq_io_context *cic;
1699
1700	might_sleep_if(gfp_mask & __GFP_WAIT);
1701
1702	ioc = get_io_context(gfp_mask, cfqd->queue->node);
1703	if (!ioc)
1704		return NULL;
1705
1706	cic = cfq_cic_lookup(cfqd, ioc);
1707	if (cic)
1708		goto out;
1709
1710	cic = cfq_alloc_io_context(cfqd, gfp_mask);
1711	if (cic == NULL)
1712		goto err;
1713
1714	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
1715		goto err_free;
1716
1717out:
1718	smp_read_barrier_depends();
1719	if (unlikely(ioc->ioprio_changed))
1720		cfq_ioc_set_ioprio(ioc);
1721
1722	return cic;
1723err_free:
1724	cfq_cic_free(cic);
1725err:
1726	put_io_context(ioc);
1727	return NULL;
1728}
1729
1730static void
1731cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1732{
1733	unsigned long elapsed = jiffies - cic->last_end_request;
1734	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
1735
1736	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
1737	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
1738	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
1739}
1740
1741static void
1742cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
1743		       struct request *rq)
1744{
1745	sector_t sdist;
1746	u64 total;
1747
1748	if (cic->last_request_pos < rq->sector)
1749		sdist = rq->sector - cic->last_request_pos;
1750	else
1751		sdist = cic->last_request_pos - rq->sector;
1752
1753	/*
1754	 * Don't allow the seek distance to get too large from the
1755	 * odd fragment, pagein, etc
1756	 */
1757	if (cic->seek_samples <= 60) /* second&third seek */
1758		sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
1759	else
1760		sdist = min(sdist, (cic->seek_mean * 4)	+ 2*1024*64);
1761
1762	cic->seek_samples = (7*cic->seek_samples + 256) / 8;
1763	cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
1764	total = cic->seek_total + (cic->seek_samples/2);
1765	do_div(total, cic->seek_samples);
1766	cic->seek_mean = (sector_t)total;
1767}
1768
1769/*
1770 * Disable idle window if the process thinks too long or seeks so much that
1771 * it doesn't matter
1772 */
1773static void
1774cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1775		       struct cfq_io_context *cic)
1776{
1777	int old_idle, enable_idle;
1778
1779	/*
1780	 * Don't idle for async or idle io prio class
1781	 */
1782	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1783		return;
1784
1785	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1786
1787	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
1788	    (cfqd->hw_tag && CIC_SEEKY(cic)))
1789		enable_idle = 0;
1790	else if (sample_valid(cic->ttime_samples)) {
1791		if (cic->ttime_mean > cfqd->cfq_slice_idle)
1792			enable_idle = 0;
1793		else
1794			enable_idle = 1;
1795	}
1796
1797	if (old_idle != enable_idle) {
1798		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
1799		if (enable_idle)
1800			cfq_mark_cfqq_idle_window(cfqq);
1801		else
1802			cfq_clear_cfqq_idle_window(cfqq);
1803	}
1804}
1805
1806/*
1807 * Check if new_cfqq should preempt the currently active queue. Return 0 for
1808 * no or if we aren't sure, a 1 will cause a preempt.
1809 */
1810static int
1811cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
1812		   struct request *rq)
1813{
1814	struct cfq_queue *cfqq;
1815
1816	cfqq = cfqd->active_queue;
1817	if (!cfqq)
1818		return 0;
1819
1820	if (cfq_slice_used(cfqq))
1821		return 1;
1822
1823	if (cfq_class_idle(new_cfqq))
1824		return 0;
1825
1826	if (cfq_class_idle(cfqq))
1827		return 1;
1828
1829	/*
1830	 * if the new request is sync, but the currently running queue is
1831	 * not, let the sync request have priority.
1832	 */
1833	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
1834		return 1;
1835
1836	/*
1837	 * So both queues are sync. Let the new request get disk time if
1838	 * it's a metadata request and the current queue is doing regular IO.
1839	 */
1840	if (rq_is_meta(rq) && !cfqq->meta_pending)
1841		return 1;
1842
1843	/*
1844	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
1845	 */
1846	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
1847		return 1;
1848
1849	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
1850		return 0;
1851
1852	/*
1853	 * if this request is as-good as one we would expect from the
1854	 * current cfqq, let it preempt
1855	 */
1856	if (cfq_rq_close(cfqd, rq))
1857		return 1;
1858
1859	return 0;
1860}
1861
1862/*
1863 * cfqq preempts the active queue. if we allowed preempt with no slice left,
1864 * let it have half of its nominal slice.
1865 */
1866static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1867{
1868	cfq_log_cfqq(cfqd, cfqq, "preempt");
1869	cfq_slice_expired(cfqd, 1);
1870
1871	/*
1872	 * Put the new queue at the front of the of the current list,
1873	 * so we know that it will be selected next.
1874	 */
1875	BUG_ON(!cfq_cfqq_on_rr(cfqq));
1876
1877	cfq_service_tree_add(cfqd, cfqq, 1);
1878
1879	cfqq->slice_end = 0;
1880	cfq_mark_cfqq_slice_new(cfqq);
1881}
1882
1883/*
1884 * Called when a new fs request (rq) is added (to cfqq). Check if there's
1885 * something we should do about it
1886 */
1887static void
1888cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1889		struct request *rq)
1890{
1891	struct cfq_io_context *cic = RQ_CIC(rq);
1892
1893	cfqd->rq_queued++;
1894	if (rq_is_meta(rq))
1895		cfqq->meta_pending++;
1896
1897	cfq_update_io_thinktime(cfqd, cic);
1898	cfq_update_io_seektime(cfqd, cic, rq);
1899	cfq_update_idle_window(cfqd, cfqq, cic);
1900
1901	cic->last_request_pos = rq->sector + rq->nr_sectors;
1902
1903	if (cfqq == cfqd->active_queue) {
1904		/*
1905		 * Remember that we saw a request from this process, but
1906		 * don't start queuing just yet. Otherwise we risk seeing lots
1907		 * of tiny requests, because we disrupt the normal plugging
1908		 * and merging.
1909		 */
1910		if (cfq_cfqq_wait_request(cfqq))
1911			cfq_mark_cfqq_must_dispatch(cfqq);
1912	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1913		/*
1914		 * not the active queue - expire current slice if it is
1915		 * idle and has expired it's mean thinktime or this new queue
1916		 * has some old slice time left and is of higher priority or
1917		 * this new queue is RT and the current one is BE
1918		 */
1919		cfq_preempt_queue(cfqd, cfqq);
1920		blk_start_queueing(cfqd->queue);
1921	}
1922}
1923
1924static void cfq_insert_request(struct request_queue *q, struct request *rq)
1925{
1926	struct cfq_data *cfqd = q->elevator->elevator_data;
1927	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1928
1929	cfq_log_cfqq(cfqd, cfqq, "insert_request");
1930	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1931
1932	cfq_add_rq_rb(rq);
1933
1934	list_add_tail(&rq->queuelist, &cfqq->fifo);
1935
1936	cfq_rq_enqueued(cfqd, cfqq, rq);
1937}
1938
1939/*
1940 * Update hw_tag based on peak queue depth over 50 samples under
1941 * sufficient load.
1942 */
1943static void cfq_update_hw_tag(struct cfq_data *cfqd)
1944{
1945	if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
1946		cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
1947
1948	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
1949	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
1950		return;
1951
1952	if (cfqd->hw_tag_samples++ < 50)
1953		return;
1954
1955	if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
1956		cfqd->hw_tag = 1;
1957	else
1958		cfqd->hw_tag = 0;
1959
1960	cfqd->hw_tag_samples = 0;
1961	cfqd->rq_in_driver_peak = 0;
1962}
1963
1964static void cfq_completed_request(struct request_queue *q, struct request *rq)
1965{
1966	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1967	struct cfq_data *cfqd = cfqq->cfqd;
1968	const int sync = rq_is_sync(rq);
1969	unsigned long now;
1970
1971	now = jiffies;
1972	cfq_log_cfqq(cfqd, cfqq, "complete");
1973
1974	cfq_update_hw_tag(cfqd);
1975
1976	WARN_ON(!cfqd->rq_in_driver);
1977	WARN_ON(!cfqq->dispatched);
1978	cfqd->rq_in_driver--;
1979	cfqq->dispatched--;
1980
1981	if (cfq_cfqq_sync(cfqq))
1982		cfqd->sync_flight--;
1983
1984	if (!cfq_class_idle(cfqq))
1985		cfqd->last_end_request = now;
1986
1987	if (sync)
1988		RQ_CIC(rq)->last_end_request = now;
1989
1990	/*
1991	 * If this is the active queue, check if it needs to be expired,
1992	 * or if we want to idle in case it has no pending requests.
1993	 */
1994	if (cfqd->active_queue == cfqq) {
1995		if (cfq_cfqq_slice_new(cfqq)) {
1996			cfq_set_prio_slice(cfqd, cfqq);
1997			cfq_clear_cfqq_slice_new(cfqq);
1998		}
1999		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
2000			cfq_slice_expired(cfqd, 1);
2001		else if (sync && !rq_noidle(rq) &&
2002			 RB_EMPTY_ROOT(&cfqq->sort_list)) {
2003			cfq_arm_slice_timer(cfqd);
2004		}
2005	}
2006
2007	if (!cfqd->rq_in_driver)
2008		cfq_schedule_dispatch(cfqd);
2009}
2010
2011/*
2012 * we temporarily boost lower priority queues if they are holding fs exclusive
2013 * resources. they are boosted to normal prio (CLASS_BE/4)
2014 */
2015static void cfq_prio_boost(struct cfq_queue *cfqq)
2016{
2017	if (has_fs_excl()) {
2018		/*
2019		 * boost idle prio on transactions that would lock out other
2020		 * users of the filesystem
2021		 */
2022		if (cfq_class_idle(cfqq))
2023			cfqq->ioprio_class = IOPRIO_CLASS_BE;
2024		if (cfqq->ioprio > IOPRIO_NORM)
2025			cfqq->ioprio = IOPRIO_NORM;
2026	} else {
2027		/*
2028		 * check if we need to unboost the queue
2029		 */
2030		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
2031			cfqq->ioprio_class = cfqq->org_ioprio_class;
2032		if (cfqq->ioprio != cfqq->org_ioprio)
2033			cfqq->ioprio = cfqq->org_ioprio;
2034	}
2035}
2036
2037static inline int __cfq_may_queue(struct cfq_queue *cfqq)
2038{
2039	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
2040	    !cfq_cfqq_must_alloc_slice(cfqq)) {
2041		cfq_mark_cfqq_must_alloc_slice(cfqq);
2042		return ELV_MQUEUE_MUST;
2043	}
2044
2045	return ELV_MQUEUE_MAY;
2046}
2047
2048static int cfq_may_queue(struct request_queue *q, int rw)
2049{
2050	struct cfq_data *cfqd = q->elevator->elevator_data;
2051	struct task_struct *tsk = current;
2052	struct cfq_io_context *cic;
2053	struct cfq_queue *cfqq;
2054
2055	/*
2056	 * don't force setup of a queue from here, as a call to may_queue
2057	 * does not necessarily imply that a request actually will be queued.
2058	 * so just lookup a possibly existing queue, or return 'may queue'
2059	 * if that fails
2060	 */
2061	cic = cfq_cic_lookup(cfqd, tsk->io_context);
2062	if (!cic)
2063		return ELV_MQUEUE_MAY;
2064
2065	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
2066	if (cfqq) {
2067		cfq_init_prio_data(cfqq, cic->ioc);
2068		cfq_prio_boost(cfqq);
2069
2070		return __cfq_may_queue(cfqq);
2071	}
2072
2073	return ELV_MQUEUE_MAY;
2074}
2075
2076/*
2077 * queue lock held here
2078 */
2079static void cfq_put_request(struct request *rq)
2080{
2081	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2082
2083	if (cfqq) {
2084		const int rw = rq_data_dir(rq);
2085
2086		BUG_ON(!cfqq->allocated[rw]);
2087		cfqq->allocated[rw]--;
2088
2089		put_io_context(RQ_CIC(rq)->ioc);
2090
2091		rq->elevator_private = NULL;
2092		rq->elevator_private2 = NULL;
2093
2094		cfq_put_queue(cfqq);
2095	}
2096}
2097
2098/*
2099 * Allocate cfq data structures associated with this request.
2100 */
2101static int
2102cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
2103{
2104	struct cfq_data *cfqd = q->elevator->elevator_data;
2105	struct cfq_io_context *cic;
2106	const int rw = rq_data_dir(rq);
2107	const int is_sync = rq_is_sync(rq);
2108	struct cfq_queue *cfqq;
2109	unsigned long flags;
2110
2111	might_sleep_if(gfp_mask & __GFP_WAIT);
2112
2113	cic = cfq_get_io_context(cfqd, gfp_mask);
2114
2115	spin_lock_irqsave(q->queue_lock, flags);
2116
2117	if (!cic)
2118		goto queue_fail;
2119
2120	cfqq = cic_to_cfqq(cic, is_sync);
2121	if (!cfqq) {
2122		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
2123
2124		if (!cfqq)
2125			goto queue_fail;
2126
2127		cic_set_cfqq(cic, cfqq, is_sync);
2128	}
2129
2130	cfqq->allocated[rw]++;
2131	cfq_clear_cfqq_must_alloc(cfqq);
2132	atomic_inc(&cfqq->ref);
2133
2134	spin_unlock_irqrestore(q->queue_lock, flags);
2135
2136	rq->elevator_private = cic;
2137	rq->elevator_private2 = cfqq;
2138	return 0;
2139
2140queue_fail:
2141	if (cic)
2142		put_io_context(cic->ioc);
2143
2144	cfq_schedule_dispatch(cfqd);
2145	spin_unlock_irqrestore(q->queue_lock, flags);
2146	cfq_log(cfqd, "set_request fail");
2147	return 1;
2148}
2149
2150static void cfq_kick_queue(struct work_struct *work)
2151{
2152	struct cfq_data *cfqd =
2153		container_of(work, struct cfq_data, unplug_work);
2154	struct request_queue *q = cfqd->queue;
2155	unsigned long flags;
2156
2157	spin_lock_irqsave(q->queue_lock, flags);
2158	blk_start_queueing(q);
2159	spin_unlock_irqrestore(q->queue_lock, flags);
2160}
2161
2162/*
2163 * Timer running if the active_queue is currently idling inside its time slice
2164 */
2165static void cfq_idle_slice_timer(unsigned long data)
2166{
2167	struct cfq_data *cfqd = (struct cfq_data *) data;
2168	struct cfq_queue *cfqq;
2169	unsigned long flags;
2170	int timed_out = 1;
2171
2172	cfq_log(cfqd, "idle timer fired");
2173
2174	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2175
2176	cfqq = cfqd->active_queue;
2177	if (cfqq) {
2178		timed_out = 0;
2179
2180		/*
2181		 * We saw a request before the queue expired, let it through
2182		 */
2183		if (cfq_cfqq_must_dispatch(cfqq))
2184			goto out_kick;
2185
2186		/*
2187		 * expired
2188		 */
2189		if (cfq_slice_used(cfqq))
2190			goto expire;
2191
2192		/*
2193		 * only expire and reinvoke request handler, if there are
2194		 * other queues with pending requests
2195		 */
2196		if (!cfqd->busy_queues)
2197			goto out_cont;
2198
2199		/*
2200		 * not expired and it has a request pending, let it dispatch
2201		 */
2202		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2203			goto out_kick;
2204	}
2205expire:
2206	cfq_slice_expired(cfqd, timed_out);
2207out_kick:
2208	cfq_schedule_dispatch(cfqd);
2209out_cont:
2210	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2211}
2212
2213static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2214{
2215	del_timer_sync(&cfqd->idle_slice_timer);
2216	cancel_work_sync(&cfqd->unplug_work);
2217}
2218
2219static void cfq_put_async_queues(struct cfq_data *cfqd)
2220{
2221	int i;
2222
2223	for (i = 0; i < IOPRIO_BE_NR; i++) {
2224		if (cfqd->async_cfqq[0][i])
2225			cfq_put_queue(cfqd->async_cfqq[0][i]);
2226		if (cfqd->async_cfqq[1][i])
2227			cfq_put_queue(cfqd->async_cfqq[1][i]);
2228	}
2229
2230	if (cfqd->async_idle_cfqq)
2231		cfq_put_queue(cfqd->async_idle_cfqq);
2232}
2233
2234static void cfq_exit_queue(struct elevator_queue *e)
2235{
2236	struct cfq_data *cfqd = e->elevator_data;
2237	struct request_queue *q = cfqd->queue;
2238
2239	cfq_shutdown_timer_wq(cfqd);
2240
2241	spin_lock_irq(q->queue_lock);
2242
2243	if (cfqd->active_queue)
2244		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
2245
2246	while (!list_empty(&cfqd->cic_list)) {
2247		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2248							struct cfq_io_context,
2249							queue_list);
2250
2251		__cfq_exit_single_io_context(cfqd, cic);
2252	}
2253
2254	cfq_put_async_queues(cfqd);
2255
2256	spin_unlock_irq(q->queue_lock);
2257
2258	cfq_shutdown_timer_wq(cfqd);
2259
2260	kfree(cfqd);
2261}
2262
2263static void *cfq_init_queue(struct request_queue *q)
2264{
2265	struct cfq_data *cfqd;
2266
2267	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2268	if (!cfqd)
2269		return NULL;
2270
2271	cfqd->service_tree = CFQ_RB_ROOT;
2272	INIT_LIST_HEAD(&cfqd->cic_list);
2273
2274	cfqd->queue = q;
2275
2276	init_timer(&cfqd->idle_slice_timer);
2277	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2278	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2279
2280	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
2281
2282	cfqd->last_end_request = jiffies;
2283	cfqd->cfq_quantum = cfq_quantum;
2284	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2285	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
2286	cfqd->cfq_back_max = cfq_back_max;
2287	cfqd->cfq_back_penalty = cfq_back_penalty;
2288	cfqd->cfq_slice[0] = cfq_slice_async;
2289	cfqd->cfq_slice[1] = cfq_slice_sync;
2290	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2291	cfqd->cfq_slice_idle = cfq_slice_idle;
2292	cfqd->hw_tag = 1;
2293
2294	return cfqd;
2295}
2296
2297static void cfq_slab_kill(void)
2298{
2299	/*
2300	 * Caller already ensured that pending RCU callbacks are completed,
2301	 * so we should have no busy allocations at this point.
2302	 */
2303	if (cfq_pool)
2304		kmem_cache_destroy(cfq_pool);
2305	if (cfq_ioc_pool)
2306		kmem_cache_destroy(cfq_ioc_pool);
2307}
2308
2309static int __init cfq_slab_setup(void)
2310{
2311	cfq_pool = KMEM_CACHE(cfq_queue, 0);
2312	if (!cfq_pool)
2313		goto fail;
2314
2315	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
2316	if (!cfq_ioc_pool)
2317		goto fail;
2318
2319	return 0;
2320fail:
2321	cfq_slab_kill();
2322	return -ENOMEM;
2323}
2324
2325/*
2326 * sysfs parts below -->
2327 */
2328static ssize_t
2329cfq_var_show(unsigned int var, char *page)
2330{
2331	return sprintf(page, "%d\n", var);
2332}
2333
2334static ssize_t
2335cfq_var_store(unsigned int *var, const char *page, size_t count)
2336{
2337	char *p = (char *) page;
2338
2339	*var = simple_strtoul(p, &p, 10);
2340	return count;
2341}
2342
2343#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
2344static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
2345{									\
2346	struct cfq_data *cfqd = e->elevator_data;			\
2347	unsigned int __data = __VAR;					\
2348	if (__CONV)							\
2349		__data = jiffies_to_msecs(__data);			\
2350	return cfq_var_show(__data, (page));				\
2351}
2352SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
2353SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
2354SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
2355SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
2356SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
2357SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
2358SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
2359SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
2360SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
2361#undef SHOW_FUNCTION
2362
2363#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
2364static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
2365{									\
2366	struct cfq_data *cfqd = e->elevator_data;			\
2367	unsigned int __data;						\
2368	int ret = cfq_var_store(&__data, (page), count);		\
2369	if (__data < (MIN))						\
2370		__data = (MIN);						\
2371	else if (__data > (MAX))					\
2372		__data = (MAX);						\
2373	if (__CONV)							\
2374		*(__PTR) = msecs_to_jiffies(__data);			\
2375	else								\
2376		*(__PTR) = __data;					\
2377	return ret;							\
2378}
2379STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
2380STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
2381		UINT_MAX, 1);
2382STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
2383		UINT_MAX, 1);
2384STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
2385STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
2386		UINT_MAX, 0);
2387STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
2388STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
2389STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
2390STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
2391		UINT_MAX, 0);
2392#undef STORE_FUNCTION
2393
2394#define CFQ_ATTR(name) \
2395	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
2396
2397static struct elv_fs_entry cfq_attrs[] = {
2398	CFQ_ATTR(quantum),
2399	CFQ_ATTR(fifo_expire_sync),
2400	CFQ_ATTR(fifo_expire_async),
2401	CFQ_ATTR(back_seek_max),
2402	CFQ_ATTR(back_seek_penalty),
2403	CFQ_ATTR(slice_sync),
2404	CFQ_ATTR(slice_async),
2405	CFQ_ATTR(slice_async_rq),
2406	CFQ_ATTR(slice_idle),
2407	__ATTR_NULL
2408};
2409
2410static struct elevator_type iosched_cfq = {
2411	.ops = {
2412		.elevator_merge_fn = 		cfq_merge,
2413		.elevator_merged_fn =		cfq_merged_request,
2414		.elevator_merge_req_fn =	cfq_merged_requests,
2415		.elevator_allow_merge_fn =	cfq_allow_merge,
2416		.elevator_dispatch_fn =		cfq_dispatch_requests,
2417		.elevator_add_req_fn =		cfq_insert_request,
2418		.elevator_activate_req_fn =	cfq_activate_request,
2419		.elevator_deactivate_req_fn =	cfq_deactivate_request,
2420		.elevator_queue_empty_fn =	cfq_queue_empty,
2421		.elevator_completed_req_fn =	cfq_completed_request,
2422		.elevator_former_req_fn =	elv_rb_former_request,
2423		.elevator_latter_req_fn =	elv_rb_latter_request,
2424		.elevator_set_req_fn =		cfq_set_request,
2425		.elevator_put_req_fn =		cfq_put_request,
2426		.elevator_may_queue_fn =	cfq_may_queue,
2427		.elevator_init_fn =		cfq_init_queue,
2428		.elevator_exit_fn =		cfq_exit_queue,
2429		.trim =				cfq_free_io_context,
2430	},
2431	.elevator_attrs =	cfq_attrs,
2432	.elevator_name =	"cfq",
2433	.elevator_owner =	THIS_MODULE,
2434};
2435
2436static int __init cfq_init(void)
2437{
2438	/*
2439	 * could be 0 on HZ < 1000 setups
2440	 */
2441	if (!cfq_slice_async)
2442		cfq_slice_async = 1;
2443	if (!cfq_slice_idle)
2444		cfq_slice_idle = 1;
2445
2446	if (cfq_slab_setup())
2447		return -ENOMEM;
2448
2449	elv_register(&iosched_cfq);
2450
2451	return 0;
2452}
2453
2454static void __exit cfq_exit(void)
2455{
2456	DECLARE_COMPLETION_ONSTACK(all_gone);
2457	elv_unregister(&iosched_cfq);
2458	ioc_gone = &all_gone;
2459	/* ioc_gone's update must be visible before reading ioc_count */
2460	smp_wmb();
2461
2462	/*
2463	 * this also protects us from entering cfq_slab_kill() with
2464	 * pending RCU callbacks
2465	 */
2466	if (elv_ioc_count_read(ioc_count))
2467		wait_for_completion(&all_gone);
2468	cfq_slab_kill();
2469}
2470
2471module_init(cfq_init);
2472module_exit(cfq_exit);
2473
2474MODULE_AUTHOR("Jens Axboe");
2475MODULE_LICENSE("GPL");
2476MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
2477