cfq-iosched.c revision afc24d49c1e5dbeef745c1c1246f5ae6ebd97c71
1/*
2 *  CFQ, or complete fairness queueing, disk scheduler.
3 *
4 *  Based on ideas from a previously unfinished io
5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
13#include <linux/jiffies.h>
14#include <linux/rbtree.h>
15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h>
17#include "blk-cgroup.h"
18
19/*
20 * tunables
21 */
22/* max queue in one round of service */
23static const int cfq_quantum = 8;
24static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25/* maximum backwards seek, in KiB */
26static const int cfq_back_max = 16 * 1024;
27/* penalty of a backwards seek */
28static const int cfq_back_penalty = 2;
29static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125;
33static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
34static const int cfq_hist_divisor = 4;
35
36/*
37 * offset from end of service tree
38 */
39#define CFQ_IDLE_DELAY		(HZ / 5)
40
41/*
42 * below this threshold, we consider thinktime immediate
43 */
44#define CFQ_MIN_TT		(2)
45
46#define CFQ_SLICE_SCALE		(5)
47#define CFQ_HW_QUEUE_MIN	(5)
48#define CFQ_SERVICE_SHIFT       12
49
50#define CFQQ_SEEK_THR		(sector_t)(8 * 100)
51#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
52#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
53#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
54
55#define RQ_CIC(rq)		\
56	((struct cfq_io_context *) (rq)->elevator_private)
57#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
58#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elevator_private3)
59
60static struct kmem_cache *cfq_pool;
61static struct kmem_cache *cfq_ioc_pool;
62
63static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
64static struct completion *ioc_gone;
65static DEFINE_SPINLOCK(ioc_gone_lock);
66
67#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
68#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
69#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
70
71#define sample_valid(samples)	((samples) > 80)
72#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
73
74/*
75 * Most of our rbtree usage is for sorting with min extraction, so
76 * if we cache the leftmost node we don't have to walk down the tree
77 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
78 * move this into the elevator for the rq sorting as well.
79 */
80struct cfq_rb_root {
81	struct rb_root rb;
82	struct rb_node *left;
83	unsigned count;
84	unsigned total_weight;
85	u64 min_vdisktime;
86	struct rb_node *active;
87};
88#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
89			.count = 0, .min_vdisktime = 0, }
90
91/*
92 * Per process-grouping structure
93 */
94struct cfq_queue {
95	/* reference count */
96	atomic_t ref;
97	/* various state flags, see below */
98	unsigned int flags;
99	/* parent cfq_data */
100	struct cfq_data *cfqd;
101	/* service_tree member */
102	struct rb_node rb_node;
103	/* service_tree key */
104	unsigned long rb_key;
105	/* prio tree member */
106	struct rb_node p_node;
107	/* prio tree root we belong to, if any */
108	struct rb_root *p_root;
109	/* sorted list of pending requests */
110	struct rb_root sort_list;
111	/* if fifo isn't expired, next request to serve */
112	struct request *next_rq;
113	/* requests queued in sort_list */
114	int queued[2];
115	/* currently allocated requests */
116	int allocated[2];
117	/* fifo list of requests in sort_list */
118	struct list_head fifo;
119
120	/* time when queue got scheduled in to dispatch first request. */
121	unsigned long dispatch_start;
122	unsigned int allocated_slice;
123	unsigned int slice_dispatch;
124	/* time when first request from queue completed and slice started. */
125	unsigned long slice_start;
126	unsigned long slice_end;
127	long slice_resid;
128
129	/* pending metadata requests */
130	int meta_pending;
131	/* number of requests that are on the dispatch list or inside driver */
132	int dispatched;
133
134	/* io prio of this group */
135	unsigned short ioprio, org_ioprio;
136	unsigned short ioprio_class, org_ioprio_class;
137
138	pid_t pid;
139
140	u32 seek_history;
141	sector_t last_request_pos;
142
143	struct cfq_rb_root *service_tree;
144	struct cfq_queue *new_cfqq;
145	struct cfq_group *cfqg;
146	struct cfq_group *orig_cfqg;
147};
148
149/*
150 * First index in the service_trees.
151 * IDLE is handled separately, so it has negative index
152 */
153enum wl_prio_t {
154	BE_WORKLOAD = 0,
155	RT_WORKLOAD = 1,
156	IDLE_WORKLOAD = 2,
157};
158
159/*
160 * Second index in the service_trees.
161 */
162enum wl_type_t {
163	ASYNC_WORKLOAD = 0,
164	SYNC_NOIDLE_WORKLOAD = 1,
165	SYNC_WORKLOAD = 2
166};
167
168/* This is per cgroup per device grouping structure */
169struct cfq_group {
170	/* group service_tree member */
171	struct rb_node rb_node;
172
173	/* group service_tree key */
174	u64 vdisktime;
175	unsigned int weight;
176	bool on_st;
177
178	/* number of cfqq currently on this group */
179	int nr_cfqq;
180
181	/* Per group busy queus average. Useful for workload slice calc. */
182	unsigned int busy_queues_avg[2];
183	/*
184	 * rr lists of queues with requests, onle rr for each priority class.
185	 * Counts are embedded in the cfq_rb_root
186	 */
187	struct cfq_rb_root service_trees[2][3];
188	struct cfq_rb_root service_tree_idle;
189
190	unsigned long saved_workload_slice;
191	enum wl_type_t saved_workload;
192	enum wl_prio_t saved_serving_prio;
193	struct blkio_group blkg;
194#ifdef CONFIG_CFQ_GROUP_IOSCHED
195	struct hlist_node cfqd_node;
196	atomic_t ref;
197#endif
198};
199
200/*
201 * Per block device queue structure
202 */
203struct cfq_data {
204	struct request_queue *queue;
205	/* Root service tree for cfq_groups */
206	struct cfq_rb_root grp_service_tree;
207	struct cfq_group root_group;
208
209	/*
210	 * The priority currently being served
211	 */
212	enum wl_prio_t serving_prio;
213	enum wl_type_t serving_type;
214	unsigned long workload_expires;
215	struct cfq_group *serving_group;
216	bool noidle_tree_requires_idle;
217
218	/*
219	 * Each priority tree is sorted by next_request position.  These
220	 * trees are used when determining if two or more queues are
221	 * interleaving requests (see cfq_close_cooperator).
222	 */
223	struct rb_root prio_trees[CFQ_PRIO_LISTS];
224
225	unsigned int busy_queues;
226
227	int rq_in_driver;
228	int rq_in_flight[2];
229
230	/*
231	 * queue-depth detection
232	 */
233	int rq_queued;
234	int hw_tag;
235	/*
236	 * hw_tag can be
237	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
238	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
239	 *  0 => no NCQ
240	 */
241	int hw_tag_est_depth;
242	unsigned int hw_tag_samples;
243
244	/*
245	 * idle window management
246	 */
247	struct timer_list idle_slice_timer;
248	struct work_struct unplug_work;
249
250	struct cfq_queue *active_queue;
251	struct cfq_io_context *active_cic;
252
253	/*
254	 * async queue for each priority case
255	 */
256	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
257	struct cfq_queue *async_idle_cfqq;
258
259	sector_t last_position;
260
261	/*
262	 * tunables, see top of file
263	 */
264	unsigned int cfq_quantum;
265	unsigned int cfq_fifo_expire[2];
266	unsigned int cfq_back_penalty;
267	unsigned int cfq_back_max;
268	unsigned int cfq_slice[2];
269	unsigned int cfq_slice_async_rq;
270	unsigned int cfq_slice_idle;
271	unsigned int cfq_latency;
272	unsigned int cfq_group_isolation;
273
274	struct list_head cic_list;
275
276	/*
277	 * Fallback dummy cfqq for extreme OOM conditions
278	 */
279	struct cfq_queue oom_cfqq;
280
281	unsigned long last_delayed_sync;
282
283	/* List of cfq groups being managed on this device*/
284	struct hlist_head cfqg_list;
285	struct rcu_head rcu;
286};
287
288static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
289
290static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
291					    enum wl_prio_t prio,
292					    enum wl_type_t type)
293{
294	if (!cfqg)
295		return NULL;
296
297	if (prio == IDLE_WORKLOAD)
298		return &cfqg->service_tree_idle;
299
300	return &cfqg->service_trees[prio][type];
301}
302
303enum cfqq_state_flags {
304	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
305	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
306	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
307	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
308	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
309	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
310	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
311	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
312	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
313	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
314	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
315	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
316	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
317};
318
319#define CFQ_CFQQ_FNS(name)						\
320static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
321{									\
322	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
323}									\
324static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
325{									\
326	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
327}									\
328static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
329{									\
330	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
331}
332
333CFQ_CFQQ_FNS(on_rr);
334CFQ_CFQQ_FNS(wait_request);
335CFQ_CFQQ_FNS(must_dispatch);
336CFQ_CFQQ_FNS(must_alloc_slice);
337CFQ_CFQQ_FNS(fifo_expire);
338CFQ_CFQQ_FNS(idle_window);
339CFQ_CFQQ_FNS(prio_changed);
340CFQ_CFQQ_FNS(slice_new);
341CFQ_CFQQ_FNS(sync);
342CFQ_CFQQ_FNS(coop);
343CFQ_CFQQ_FNS(split_coop);
344CFQ_CFQQ_FNS(deep);
345CFQ_CFQQ_FNS(wait_busy);
346#undef CFQ_CFQQ_FNS
347
348#ifdef CONFIG_CFQ_GROUP_IOSCHED
349#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
350	blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
351			cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
352			blkg_path(&(cfqq)->cfqg->blkg), ##args);
353
354#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)				\
355	blk_add_trace_msg((cfqd)->queue, "%s " fmt,			\
356				blkg_path(&(cfqg)->blkg), ##args);      \
357
358#else
359#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
360	blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
361#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0);
362#endif
363#define cfq_log(cfqd, fmt, args...)	\
364	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
365
366/* Traverses through cfq group service trees */
367#define for_each_cfqg_st(cfqg, i, j, st) \
368	for (i = 0; i <= IDLE_WORKLOAD; i++) \
369		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
370			: &cfqg->service_tree_idle; \
371			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
372			(i == IDLE_WORKLOAD && j == 0); \
373			j++, st = i < IDLE_WORKLOAD ? \
374			&cfqg->service_trees[i][j]: NULL) \
375
376
377static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
378{
379	if (cfq_class_idle(cfqq))
380		return IDLE_WORKLOAD;
381	if (cfq_class_rt(cfqq))
382		return RT_WORKLOAD;
383	return BE_WORKLOAD;
384}
385
386
387static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
388{
389	if (!cfq_cfqq_sync(cfqq))
390		return ASYNC_WORKLOAD;
391	if (!cfq_cfqq_idle_window(cfqq))
392		return SYNC_NOIDLE_WORKLOAD;
393	return SYNC_WORKLOAD;
394}
395
396static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
397					struct cfq_data *cfqd,
398					struct cfq_group *cfqg)
399{
400	if (wl == IDLE_WORKLOAD)
401		return cfqg->service_tree_idle.count;
402
403	return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
404		+ cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
405		+ cfqg->service_trees[wl][SYNC_WORKLOAD].count;
406}
407
408static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
409					struct cfq_group *cfqg)
410{
411	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
412		+ cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
413}
414
415static void cfq_dispatch_insert(struct request_queue *, struct request *);
416static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
417				       struct io_context *, gfp_t);
418static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
419						struct io_context *);
420
421static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
422					    bool is_sync)
423{
424	return cic->cfqq[is_sync];
425}
426
427static inline void cic_set_cfqq(struct cfq_io_context *cic,
428				struct cfq_queue *cfqq, bool is_sync)
429{
430	cic->cfqq[is_sync] = cfqq;
431}
432
433/*
434 * We regard a request as SYNC, if it's either a read or has the SYNC bit
435 * set (in which case it could also be direct WRITE).
436 */
437static inline bool cfq_bio_sync(struct bio *bio)
438{
439	return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
440}
441
442/*
443 * scheduler run of queue, if there are requests pending and no one in the
444 * driver that will restart queueing
445 */
446static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
447{
448	if (cfqd->busy_queues) {
449		cfq_log(cfqd, "schedule dispatch");
450		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
451	}
452}
453
454static int cfq_queue_empty(struct request_queue *q)
455{
456	struct cfq_data *cfqd = q->elevator->elevator_data;
457
458	return !cfqd->rq_queued;
459}
460
461/*
462 * Scale schedule slice based on io priority. Use the sync time slice only
463 * if a queue is marked sync and has sync io queued. A sync queue with async
464 * io only, should not get full sync slice length.
465 */
466static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
467				 unsigned short prio)
468{
469	const int base_slice = cfqd->cfq_slice[sync];
470
471	WARN_ON(prio >= IOPRIO_BE_NR);
472
473	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
474}
475
476static inline int
477cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
478{
479	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
480}
481
482static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
483{
484	u64 d = delta << CFQ_SERVICE_SHIFT;
485
486	d = d * BLKIO_WEIGHT_DEFAULT;
487	do_div(d, cfqg->weight);
488	return d;
489}
490
491static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
492{
493	s64 delta = (s64)(vdisktime - min_vdisktime);
494	if (delta > 0)
495		min_vdisktime = vdisktime;
496
497	return min_vdisktime;
498}
499
500static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
501{
502	s64 delta = (s64)(vdisktime - min_vdisktime);
503	if (delta < 0)
504		min_vdisktime = vdisktime;
505
506	return min_vdisktime;
507}
508
509static void update_min_vdisktime(struct cfq_rb_root *st)
510{
511	u64 vdisktime = st->min_vdisktime;
512	struct cfq_group *cfqg;
513
514	if (st->active) {
515		cfqg = rb_entry_cfqg(st->active);
516		vdisktime = cfqg->vdisktime;
517	}
518
519	if (st->left) {
520		cfqg = rb_entry_cfqg(st->left);
521		vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
522	}
523
524	st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
525}
526
527/*
528 * get averaged number of queues of RT/BE priority.
529 * average is updated, with a formula that gives more weight to higher numbers,
530 * to quickly follows sudden increases and decrease slowly
531 */
532
533static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
534					struct cfq_group *cfqg, bool rt)
535{
536	unsigned min_q, max_q;
537	unsigned mult  = cfq_hist_divisor - 1;
538	unsigned round = cfq_hist_divisor / 2;
539	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
540
541	min_q = min(cfqg->busy_queues_avg[rt], busy);
542	max_q = max(cfqg->busy_queues_avg[rt], busy);
543	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
544		cfq_hist_divisor;
545	return cfqg->busy_queues_avg[rt];
546}
547
548static inline unsigned
549cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
550{
551	struct cfq_rb_root *st = &cfqd->grp_service_tree;
552
553	return cfq_target_latency * cfqg->weight / st->total_weight;
554}
555
556static inline void
557cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
558{
559	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
560	if (cfqd->cfq_latency) {
561		/*
562		 * interested queues (we consider only the ones with the same
563		 * priority class in the cfq group)
564		 */
565		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
566						cfq_class_rt(cfqq));
567		unsigned sync_slice = cfqd->cfq_slice[1];
568		unsigned expect_latency = sync_slice * iq;
569		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
570
571		if (expect_latency > group_slice) {
572			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
573			/* scale low_slice according to IO priority
574			 * and sync vs async */
575			unsigned low_slice =
576				min(slice, base_low_slice * slice / sync_slice);
577			/* the adapted slice value is scaled to fit all iqs
578			 * into the target latency */
579			slice = max(slice * group_slice / expect_latency,
580				    low_slice);
581		}
582	}
583	cfqq->slice_start = jiffies;
584	cfqq->slice_end = jiffies + slice;
585	cfqq->allocated_slice = slice;
586	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
587}
588
589/*
590 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
591 * isn't valid until the first request from the dispatch is activated
592 * and the slice time set.
593 */
594static inline bool cfq_slice_used(struct cfq_queue *cfqq)
595{
596	if (cfq_cfqq_slice_new(cfqq))
597		return 0;
598	if (time_before(jiffies, cfqq->slice_end))
599		return 0;
600
601	return 1;
602}
603
604/*
605 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
606 * We choose the request that is closest to the head right now. Distance
607 * behind the head is penalized and only allowed to a certain extent.
608 */
609static struct request *
610cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
611{
612	sector_t s1, s2, d1 = 0, d2 = 0;
613	unsigned long back_max;
614#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
615#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
616	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
617
618	if (rq1 == NULL || rq1 == rq2)
619		return rq2;
620	if (rq2 == NULL)
621		return rq1;
622
623	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
624		return rq1;
625	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
626		return rq2;
627	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
628		return rq1;
629	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
630		return rq2;
631
632	s1 = blk_rq_pos(rq1);
633	s2 = blk_rq_pos(rq2);
634
635	/*
636	 * by definition, 1KiB is 2 sectors
637	 */
638	back_max = cfqd->cfq_back_max * 2;
639
640	/*
641	 * Strict one way elevator _except_ in the case where we allow
642	 * short backward seeks which are biased as twice the cost of a
643	 * similar forward seek.
644	 */
645	if (s1 >= last)
646		d1 = s1 - last;
647	else if (s1 + back_max >= last)
648		d1 = (last - s1) * cfqd->cfq_back_penalty;
649	else
650		wrap |= CFQ_RQ1_WRAP;
651
652	if (s2 >= last)
653		d2 = s2 - last;
654	else if (s2 + back_max >= last)
655		d2 = (last - s2) * cfqd->cfq_back_penalty;
656	else
657		wrap |= CFQ_RQ2_WRAP;
658
659	/* Found required data */
660
661	/*
662	 * By doing switch() on the bit mask "wrap" we avoid having to
663	 * check two variables for all permutations: --> faster!
664	 */
665	switch (wrap) {
666	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
667		if (d1 < d2)
668			return rq1;
669		else if (d2 < d1)
670			return rq2;
671		else {
672			if (s1 >= s2)
673				return rq1;
674			else
675				return rq2;
676		}
677
678	case CFQ_RQ2_WRAP:
679		return rq1;
680	case CFQ_RQ1_WRAP:
681		return rq2;
682	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
683	default:
684		/*
685		 * Since both rqs are wrapped,
686		 * start with the one that's further behind head
687		 * (--> only *one* back seek required),
688		 * since back seek takes more time than forward.
689		 */
690		if (s1 <= s2)
691			return rq1;
692		else
693			return rq2;
694	}
695}
696
697/*
698 * The below is leftmost cache rbtree addon
699 */
700static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
701{
702	/* Service tree is empty */
703	if (!root->count)
704		return NULL;
705
706	if (!root->left)
707		root->left = rb_first(&root->rb);
708
709	if (root->left)
710		return rb_entry(root->left, struct cfq_queue, rb_node);
711
712	return NULL;
713}
714
715static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
716{
717	if (!root->left)
718		root->left = rb_first(&root->rb);
719
720	if (root->left)
721		return rb_entry_cfqg(root->left);
722
723	return NULL;
724}
725
726static void rb_erase_init(struct rb_node *n, struct rb_root *root)
727{
728	rb_erase(n, root);
729	RB_CLEAR_NODE(n);
730}
731
732static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
733{
734	if (root->left == n)
735		root->left = NULL;
736	rb_erase_init(n, &root->rb);
737	--root->count;
738}
739
740/*
741 * would be nice to take fifo expire time into account as well
742 */
743static struct request *
744cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
745		  struct request *last)
746{
747	struct rb_node *rbnext = rb_next(&last->rb_node);
748	struct rb_node *rbprev = rb_prev(&last->rb_node);
749	struct request *next = NULL, *prev = NULL;
750
751	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
752
753	if (rbprev)
754		prev = rb_entry_rq(rbprev);
755
756	if (rbnext)
757		next = rb_entry_rq(rbnext);
758	else {
759		rbnext = rb_first(&cfqq->sort_list);
760		if (rbnext && rbnext != &last->rb_node)
761			next = rb_entry_rq(rbnext);
762	}
763
764	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
765}
766
767static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
768				      struct cfq_queue *cfqq)
769{
770	/*
771	 * just an approximation, should be ok.
772	 */
773	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
774		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
775}
776
777static inline s64
778cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
779{
780	return cfqg->vdisktime - st->min_vdisktime;
781}
782
783static void
784__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
785{
786	struct rb_node **node = &st->rb.rb_node;
787	struct rb_node *parent = NULL;
788	struct cfq_group *__cfqg;
789	s64 key = cfqg_key(st, cfqg);
790	int left = 1;
791
792	while (*node != NULL) {
793		parent = *node;
794		__cfqg = rb_entry_cfqg(parent);
795
796		if (key < cfqg_key(st, __cfqg))
797			node = &parent->rb_left;
798		else {
799			node = &parent->rb_right;
800			left = 0;
801		}
802	}
803
804	if (left)
805		st->left = &cfqg->rb_node;
806
807	rb_link_node(&cfqg->rb_node, parent, node);
808	rb_insert_color(&cfqg->rb_node, &st->rb);
809}
810
811static void
812cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
813{
814	struct cfq_rb_root *st = &cfqd->grp_service_tree;
815	struct cfq_group *__cfqg;
816	struct rb_node *n;
817
818	cfqg->nr_cfqq++;
819	if (cfqg->on_st)
820		return;
821
822	/*
823	 * Currently put the group at the end. Later implement something
824	 * so that groups get lesser vtime based on their weights, so that
825	 * if group does not loose all if it was not continously backlogged.
826	 */
827	n = rb_last(&st->rb);
828	if (n) {
829		__cfqg = rb_entry_cfqg(n);
830		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
831	} else
832		cfqg->vdisktime = st->min_vdisktime;
833
834	__cfq_group_service_tree_add(st, cfqg);
835	cfqg->on_st = true;
836	st->total_weight += cfqg->weight;
837}
838
839static void
840cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
841{
842	struct cfq_rb_root *st = &cfqd->grp_service_tree;
843
844	if (st->active == &cfqg->rb_node)
845		st->active = NULL;
846
847	BUG_ON(cfqg->nr_cfqq < 1);
848	cfqg->nr_cfqq--;
849
850	/* If there are other cfq queues under this group, don't delete it */
851	if (cfqg->nr_cfqq)
852		return;
853
854	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
855	cfqg->on_st = false;
856	st->total_weight -= cfqg->weight;
857	if (!RB_EMPTY_NODE(&cfqg->rb_node))
858		cfq_rb_erase(&cfqg->rb_node, st);
859	cfqg->saved_workload_slice = 0;
860	blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
861}
862
863static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
864{
865	unsigned int slice_used;
866
867	/*
868	 * Queue got expired before even a single request completed or
869	 * got expired immediately after first request completion.
870	 */
871	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
872		/*
873		 * Also charge the seek time incurred to the group, otherwise
874		 * if there are mutiple queues in the group, each can dispatch
875		 * a single request on seeky media and cause lots of seek time
876		 * and group will never know it.
877		 */
878		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
879					1);
880	} else {
881		slice_used = jiffies - cfqq->slice_start;
882		if (slice_used > cfqq->allocated_slice)
883			slice_used = cfqq->allocated_slice;
884	}
885
886	cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u", slice_used);
887	return slice_used;
888}
889
890static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
891				struct cfq_queue *cfqq)
892{
893	struct cfq_rb_root *st = &cfqd->grp_service_tree;
894	unsigned int used_sl, charge_sl;
895	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
896			- cfqg->service_tree_idle.count;
897
898	BUG_ON(nr_sync < 0);
899	used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
900
901	if (!cfq_cfqq_sync(cfqq) && !nr_sync)
902		charge_sl = cfqq->allocated_slice;
903
904	/* Can't update vdisktime while group is on service tree */
905	cfq_rb_erase(&cfqg->rb_node, st);
906	cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
907	__cfq_group_service_tree_add(st, cfqg);
908
909	/* This group is being expired. Save the context */
910	if (time_after(cfqd->workload_expires, jiffies)) {
911		cfqg->saved_workload_slice = cfqd->workload_expires
912						- jiffies;
913		cfqg->saved_workload = cfqd->serving_type;
914		cfqg->saved_serving_prio = cfqd->serving_prio;
915	} else
916		cfqg->saved_workload_slice = 0;
917
918	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
919					st->min_vdisktime);
920	blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
921	blkiocg_set_start_empty_time(&cfqg->blkg);
922}
923
924#ifdef CONFIG_CFQ_GROUP_IOSCHED
925static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
926{
927	if (blkg)
928		return container_of(blkg, struct cfq_group, blkg);
929	return NULL;
930}
931
932void
933cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
934{
935	cfqg_of_blkg(blkg)->weight = weight;
936}
937
938static struct cfq_group *
939cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
940{
941	struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
942	struct cfq_group *cfqg = NULL;
943	void *key = cfqd;
944	int i, j;
945	struct cfq_rb_root *st;
946	struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
947	unsigned int major, minor;
948
949	cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
950	if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
951		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
952		cfqg->blkg.dev = MKDEV(major, minor);
953		goto done;
954	}
955	if (cfqg || !create)
956		goto done;
957
958	cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
959	if (!cfqg)
960		goto done;
961
962	for_each_cfqg_st(cfqg, i, j, st)
963		*st = CFQ_RB_ROOT;
964	RB_CLEAR_NODE(&cfqg->rb_node);
965
966	/*
967	 * Take the initial reference that will be released on destroy
968	 * This can be thought of a joint reference by cgroup and
969	 * elevator which will be dropped by either elevator exit
970	 * or cgroup deletion path depending on who is exiting first.
971	 */
972	atomic_set(&cfqg->ref, 1);
973
974	/* Add group onto cgroup list */
975	sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
976	blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
977					MKDEV(major, minor));
978	cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
979
980	/* Add group on cfqd list */
981	hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
982
983done:
984	return cfqg;
985}
986
987/*
988 * Search for the cfq group current task belongs to. If create = 1, then also
989 * create the cfq group if it does not exist. request_queue lock must be held.
990 */
991static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
992{
993	struct cgroup *cgroup;
994	struct cfq_group *cfqg = NULL;
995
996	rcu_read_lock();
997	cgroup = task_cgroup(current, blkio_subsys_id);
998	cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
999	if (!cfqg && create)
1000		cfqg = &cfqd->root_group;
1001	rcu_read_unlock();
1002	return cfqg;
1003}
1004
1005static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1006{
1007	atomic_inc(&cfqg->ref);
1008	return cfqg;
1009}
1010
1011static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1012{
1013	/* Currently, all async queues are mapped to root group */
1014	if (!cfq_cfqq_sync(cfqq))
1015		cfqg = &cfqq->cfqd->root_group;
1016
1017	cfqq->cfqg = cfqg;
1018	/* cfqq reference on cfqg */
1019	atomic_inc(&cfqq->cfqg->ref);
1020}
1021
1022static void cfq_put_cfqg(struct cfq_group *cfqg)
1023{
1024	struct cfq_rb_root *st;
1025	int i, j;
1026
1027	BUG_ON(atomic_read(&cfqg->ref) <= 0);
1028	if (!atomic_dec_and_test(&cfqg->ref))
1029		return;
1030	for_each_cfqg_st(cfqg, i, j, st)
1031		BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
1032	kfree(cfqg);
1033}
1034
1035static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1036{
1037	/* Something wrong if we are trying to remove same group twice */
1038	BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1039
1040	hlist_del_init(&cfqg->cfqd_node);
1041
1042	/*
1043	 * Put the reference taken at the time of creation so that when all
1044	 * queues are gone, group can be destroyed.
1045	 */
1046	cfq_put_cfqg(cfqg);
1047}
1048
1049static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1050{
1051	struct hlist_node *pos, *n;
1052	struct cfq_group *cfqg;
1053
1054	hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1055		/*
1056		 * If cgroup removal path got to blk_group first and removed
1057		 * it from cgroup list, then it will take care of destroying
1058		 * cfqg also.
1059		 */
1060		if (!blkiocg_del_blkio_group(&cfqg->blkg))
1061			cfq_destroy_cfqg(cfqd, cfqg);
1062	}
1063}
1064
1065/*
1066 * Blk cgroup controller notification saying that blkio_group object is being
1067 * delinked as associated cgroup object is going away. That also means that
1068 * no new IO will come in this group. So get rid of this group as soon as
1069 * any pending IO in the group is finished.
1070 *
1071 * This function is called under rcu_read_lock(). key is the rcu protected
1072 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1073 * read lock.
1074 *
1075 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1076 * it should not be NULL as even if elevator was exiting, cgroup deltion
1077 * path got to it first.
1078 */
1079void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1080{
1081	unsigned long  flags;
1082	struct cfq_data *cfqd = key;
1083
1084	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1085	cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1086	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1087}
1088
1089#else /* GROUP_IOSCHED */
1090static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1091{
1092	return &cfqd->root_group;
1093}
1094
1095static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1096{
1097	return NULL;
1098}
1099
1100static inline void
1101cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1102	cfqq->cfqg = cfqg;
1103}
1104
1105static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1106static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1107
1108#endif /* GROUP_IOSCHED */
1109
1110/*
1111 * The cfqd->service_trees holds all pending cfq_queue's that have
1112 * requests waiting to be processed. It is sorted in the order that
1113 * we will service the queues.
1114 */
1115static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1116				 bool add_front)
1117{
1118	struct rb_node **p, *parent;
1119	struct cfq_queue *__cfqq;
1120	unsigned long rb_key;
1121	struct cfq_rb_root *service_tree;
1122	int left;
1123	int new_cfqq = 1;
1124	int group_changed = 0;
1125
1126#ifdef CONFIG_CFQ_GROUP_IOSCHED
1127	if (!cfqd->cfq_group_isolation
1128	    && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1129	    && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1130		/* Move this cfq to root group */
1131		cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1132		if (!RB_EMPTY_NODE(&cfqq->rb_node))
1133			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1134		cfqq->orig_cfqg = cfqq->cfqg;
1135		cfqq->cfqg = &cfqd->root_group;
1136		atomic_inc(&cfqd->root_group.ref);
1137		group_changed = 1;
1138	} else if (!cfqd->cfq_group_isolation
1139		   && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1140		/* cfqq is sequential now needs to go to its original group */
1141		BUG_ON(cfqq->cfqg != &cfqd->root_group);
1142		if (!RB_EMPTY_NODE(&cfqq->rb_node))
1143			cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1144		cfq_put_cfqg(cfqq->cfqg);
1145		cfqq->cfqg = cfqq->orig_cfqg;
1146		cfqq->orig_cfqg = NULL;
1147		group_changed = 1;
1148		cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1149	}
1150#endif
1151
1152	service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1153						cfqq_type(cfqq));
1154	if (cfq_class_idle(cfqq)) {
1155		rb_key = CFQ_IDLE_DELAY;
1156		parent = rb_last(&service_tree->rb);
1157		if (parent && parent != &cfqq->rb_node) {
1158			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1159			rb_key += __cfqq->rb_key;
1160		} else
1161			rb_key += jiffies;
1162	} else if (!add_front) {
1163		/*
1164		 * Get our rb key offset. Subtract any residual slice
1165		 * value carried from last service. A negative resid
1166		 * count indicates slice overrun, and this should position
1167		 * the next service time further away in the tree.
1168		 */
1169		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1170		rb_key -= cfqq->slice_resid;
1171		cfqq->slice_resid = 0;
1172	} else {
1173		rb_key = -HZ;
1174		__cfqq = cfq_rb_first(service_tree);
1175		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1176	}
1177
1178	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1179		new_cfqq = 0;
1180		/*
1181		 * same position, nothing more to do
1182		 */
1183		if (rb_key == cfqq->rb_key &&
1184		    cfqq->service_tree == service_tree)
1185			return;
1186
1187		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1188		cfqq->service_tree = NULL;
1189	}
1190
1191	left = 1;
1192	parent = NULL;
1193	cfqq->service_tree = service_tree;
1194	p = &service_tree->rb.rb_node;
1195	while (*p) {
1196		struct rb_node **n;
1197
1198		parent = *p;
1199		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1200
1201		/*
1202		 * sort by key, that represents service time.
1203		 */
1204		if (time_before(rb_key, __cfqq->rb_key))
1205			n = &(*p)->rb_left;
1206		else {
1207			n = &(*p)->rb_right;
1208			left = 0;
1209		}
1210
1211		p = n;
1212	}
1213
1214	if (left)
1215		service_tree->left = &cfqq->rb_node;
1216
1217	cfqq->rb_key = rb_key;
1218	rb_link_node(&cfqq->rb_node, parent, p);
1219	rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1220	service_tree->count++;
1221	if ((add_front || !new_cfqq) && !group_changed)
1222		return;
1223	cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1224}
1225
1226static struct cfq_queue *
1227cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1228		     sector_t sector, struct rb_node **ret_parent,
1229		     struct rb_node ***rb_link)
1230{
1231	struct rb_node **p, *parent;
1232	struct cfq_queue *cfqq = NULL;
1233
1234	parent = NULL;
1235	p = &root->rb_node;
1236	while (*p) {
1237		struct rb_node **n;
1238
1239		parent = *p;
1240		cfqq = rb_entry(parent, struct cfq_queue, p_node);
1241
1242		/*
1243		 * Sort strictly based on sector.  Smallest to the left,
1244		 * largest to the right.
1245		 */
1246		if (sector > blk_rq_pos(cfqq->next_rq))
1247			n = &(*p)->rb_right;
1248		else if (sector < blk_rq_pos(cfqq->next_rq))
1249			n = &(*p)->rb_left;
1250		else
1251			break;
1252		p = n;
1253		cfqq = NULL;
1254	}
1255
1256	*ret_parent = parent;
1257	if (rb_link)
1258		*rb_link = p;
1259	return cfqq;
1260}
1261
1262static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1263{
1264	struct rb_node **p, *parent;
1265	struct cfq_queue *__cfqq;
1266
1267	if (cfqq->p_root) {
1268		rb_erase(&cfqq->p_node, cfqq->p_root);
1269		cfqq->p_root = NULL;
1270	}
1271
1272	if (cfq_class_idle(cfqq))
1273		return;
1274	if (!cfqq->next_rq)
1275		return;
1276
1277	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1278	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1279				      blk_rq_pos(cfqq->next_rq), &parent, &p);
1280	if (!__cfqq) {
1281		rb_link_node(&cfqq->p_node, parent, p);
1282		rb_insert_color(&cfqq->p_node, cfqq->p_root);
1283	} else
1284		cfqq->p_root = NULL;
1285}
1286
1287/*
1288 * Update cfqq's position in the service tree.
1289 */
1290static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1291{
1292	/*
1293	 * Resorting requires the cfqq to be on the RR list already.
1294	 */
1295	if (cfq_cfqq_on_rr(cfqq)) {
1296		cfq_service_tree_add(cfqd, cfqq, 0);
1297		cfq_prio_tree_add(cfqd, cfqq);
1298	}
1299}
1300
1301/*
1302 * add to busy list of queues for service, trying to be fair in ordering
1303 * the pending list according to last request service
1304 */
1305static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1306{
1307	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1308	BUG_ON(cfq_cfqq_on_rr(cfqq));
1309	cfq_mark_cfqq_on_rr(cfqq);
1310	cfqd->busy_queues++;
1311
1312	cfq_resort_rr_list(cfqd, cfqq);
1313}
1314
1315/*
1316 * Called when the cfqq no longer has requests pending, remove it from
1317 * the service tree.
1318 */
1319static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1320{
1321	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1322	BUG_ON(!cfq_cfqq_on_rr(cfqq));
1323	cfq_clear_cfqq_on_rr(cfqq);
1324
1325	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1326		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1327		cfqq->service_tree = NULL;
1328	}
1329	if (cfqq->p_root) {
1330		rb_erase(&cfqq->p_node, cfqq->p_root);
1331		cfqq->p_root = NULL;
1332	}
1333
1334	cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1335	BUG_ON(!cfqd->busy_queues);
1336	cfqd->busy_queues--;
1337}
1338
1339/*
1340 * rb tree support functions
1341 */
1342static void cfq_del_rq_rb(struct request *rq)
1343{
1344	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1345	const int sync = rq_is_sync(rq);
1346
1347	BUG_ON(!cfqq->queued[sync]);
1348	cfqq->queued[sync]--;
1349
1350	elv_rb_del(&cfqq->sort_list, rq);
1351
1352	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1353		/*
1354		 * Queue will be deleted from service tree when we actually
1355		 * expire it later. Right now just remove it from prio tree
1356		 * as it is empty.
1357		 */
1358		if (cfqq->p_root) {
1359			rb_erase(&cfqq->p_node, cfqq->p_root);
1360			cfqq->p_root = NULL;
1361		}
1362	}
1363}
1364
1365static void cfq_add_rq_rb(struct request *rq)
1366{
1367	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1368	struct cfq_data *cfqd = cfqq->cfqd;
1369	struct request *__alias, *prev;
1370
1371	cfqq->queued[rq_is_sync(rq)]++;
1372
1373	/*
1374	 * looks a little odd, but the first insert might return an alias.
1375	 * if that happens, put the alias on the dispatch list
1376	 */
1377	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1378		cfq_dispatch_insert(cfqd->queue, __alias);
1379
1380	if (!cfq_cfqq_on_rr(cfqq))
1381		cfq_add_cfqq_rr(cfqd, cfqq);
1382
1383	/*
1384	 * check if this request is a better next-serve candidate
1385	 */
1386	prev = cfqq->next_rq;
1387	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1388
1389	/*
1390	 * adjust priority tree position, if ->next_rq changes
1391	 */
1392	if (prev != cfqq->next_rq)
1393		cfq_prio_tree_add(cfqd, cfqq);
1394
1395	BUG_ON(!cfqq->next_rq);
1396}
1397
1398static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1399{
1400	elv_rb_del(&cfqq->sort_list, rq);
1401	cfqq->queued[rq_is_sync(rq)]--;
1402	blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1403						rq_is_sync(rq));
1404	cfq_add_rq_rb(rq);
1405	blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1406			&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1407			rq_is_sync(rq));
1408}
1409
1410static struct request *
1411cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1412{
1413	struct task_struct *tsk = current;
1414	struct cfq_io_context *cic;
1415	struct cfq_queue *cfqq;
1416
1417	cic = cfq_cic_lookup(cfqd, tsk->io_context);
1418	if (!cic)
1419		return NULL;
1420
1421	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1422	if (cfqq) {
1423		sector_t sector = bio->bi_sector + bio_sectors(bio);
1424
1425		return elv_rb_find(&cfqq->sort_list, sector);
1426	}
1427
1428	return NULL;
1429}
1430
1431static void cfq_activate_request(struct request_queue *q, struct request *rq)
1432{
1433	struct cfq_data *cfqd = q->elevator->elevator_data;
1434
1435	cfqd->rq_in_driver++;
1436	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1437						cfqd->rq_in_driver);
1438
1439	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1440}
1441
1442static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1443{
1444	struct cfq_data *cfqd = q->elevator->elevator_data;
1445
1446	WARN_ON(!cfqd->rq_in_driver);
1447	cfqd->rq_in_driver--;
1448	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1449						cfqd->rq_in_driver);
1450}
1451
1452static void cfq_remove_request(struct request *rq)
1453{
1454	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1455
1456	if (cfqq->next_rq == rq)
1457		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1458
1459	list_del_init(&rq->queuelist);
1460	cfq_del_rq_rb(rq);
1461
1462	cfqq->cfqd->rq_queued--;
1463	blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1464						rq_is_sync(rq));
1465	if (rq_is_meta(rq)) {
1466		WARN_ON(!cfqq->meta_pending);
1467		cfqq->meta_pending--;
1468	}
1469}
1470
1471static int cfq_merge(struct request_queue *q, struct request **req,
1472		     struct bio *bio)
1473{
1474	struct cfq_data *cfqd = q->elevator->elevator_data;
1475	struct request *__rq;
1476
1477	__rq = cfq_find_rq_fmerge(cfqd, bio);
1478	if (__rq && elv_rq_merge_ok(__rq, bio)) {
1479		*req = __rq;
1480		return ELEVATOR_FRONT_MERGE;
1481	}
1482
1483	return ELEVATOR_NO_MERGE;
1484}
1485
1486static void cfq_merged_request(struct request_queue *q, struct request *req,
1487			       int type)
1488{
1489	if (type == ELEVATOR_FRONT_MERGE) {
1490		struct cfq_queue *cfqq = RQ_CFQQ(req);
1491
1492		cfq_reposition_rq_rb(cfqq, req);
1493	}
1494}
1495
1496static void cfq_bio_merged(struct request_queue *q, struct request *req,
1497				struct bio *bio)
1498{
1499	blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
1500					cfq_bio_sync(bio));
1501}
1502
1503static void
1504cfq_merged_requests(struct request_queue *q, struct request *rq,
1505		    struct request *next)
1506{
1507	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1508	/*
1509	 * reposition in fifo if next is older than rq
1510	 */
1511	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1512	    time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1513		list_move(&rq->queuelist, &next->queuelist);
1514		rq_set_fifo_time(rq, rq_fifo_time(next));
1515	}
1516
1517	if (cfqq->next_rq == next)
1518		cfqq->next_rq = rq;
1519	cfq_remove_request(next);
1520	blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
1521					rq_is_sync(next));
1522}
1523
1524static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1525			   struct bio *bio)
1526{
1527	struct cfq_data *cfqd = q->elevator->elevator_data;
1528	struct cfq_io_context *cic;
1529	struct cfq_queue *cfqq;
1530
1531	/*
1532	 * Disallow merge of a sync bio into an async request.
1533	 */
1534	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1535		return false;
1536
1537	/*
1538	 * Lookup the cfqq that this bio will be queued with. Allow
1539	 * merge only if rq is queued there.
1540	 */
1541	cic = cfq_cic_lookup(cfqd, current->io_context);
1542	if (!cic)
1543		return false;
1544
1545	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1546	return cfqq == RQ_CFQQ(rq);
1547}
1548
1549static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1550{
1551	del_timer(&cfqd->idle_slice_timer);
1552	blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1553}
1554
1555static void __cfq_set_active_queue(struct cfq_data *cfqd,
1556				   struct cfq_queue *cfqq)
1557{
1558	if (cfqq) {
1559		cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1560				cfqd->serving_prio, cfqd->serving_type);
1561		blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1562		cfqq->slice_start = 0;
1563		cfqq->dispatch_start = jiffies;
1564		cfqq->allocated_slice = 0;
1565		cfqq->slice_end = 0;
1566		cfqq->slice_dispatch = 0;
1567
1568		cfq_clear_cfqq_wait_request(cfqq);
1569		cfq_clear_cfqq_must_dispatch(cfqq);
1570		cfq_clear_cfqq_must_alloc_slice(cfqq);
1571		cfq_clear_cfqq_fifo_expire(cfqq);
1572		cfq_mark_cfqq_slice_new(cfqq);
1573
1574		cfq_del_timer(cfqd, cfqq);
1575	}
1576
1577	cfqd->active_queue = cfqq;
1578}
1579
1580/*
1581 * current cfqq expired its slice (or was too idle), select new one
1582 */
1583static void
1584__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1585		    bool timed_out)
1586{
1587	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1588
1589	if (cfq_cfqq_wait_request(cfqq))
1590		cfq_del_timer(cfqd, cfqq);
1591
1592	cfq_clear_cfqq_wait_request(cfqq);
1593	cfq_clear_cfqq_wait_busy(cfqq);
1594
1595	/*
1596	 * If this cfqq is shared between multiple processes, check to
1597	 * make sure that those processes are still issuing I/Os within
1598	 * the mean seek distance.  If not, it may be time to break the
1599	 * queues apart again.
1600	 */
1601	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1602		cfq_mark_cfqq_split_coop(cfqq);
1603
1604	/*
1605	 * store what was left of this slice, if the queue idled/timed out
1606	 */
1607	if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1608		cfqq->slice_resid = cfqq->slice_end - jiffies;
1609		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1610	}
1611
1612	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1613
1614	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1615		cfq_del_cfqq_rr(cfqd, cfqq);
1616
1617	cfq_resort_rr_list(cfqd, cfqq);
1618
1619	if (cfqq == cfqd->active_queue)
1620		cfqd->active_queue = NULL;
1621
1622	if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1623		cfqd->grp_service_tree.active = NULL;
1624
1625	if (cfqd->active_cic) {
1626		put_io_context(cfqd->active_cic->ioc);
1627		cfqd->active_cic = NULL;
1628	}
1629}
1630
1631static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1632{
1633	struct cfq_queue *cfqq = cfqd->active_queue;
1634
1635	if (cfqq)
1636		__cfq_slice_expired(cfqd, cfqq, timed_out);
1637}
1638
1639/*
1640 * Get next queue for service. Unless we have a queue preemption,
1641 * we'll simply select the first cfqq in the service tree.
1642 */
1643static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1644{
1645	struct cfq_rb_root *service_tree =
1646		service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1647					cfqd->serving_type);
1648
1649	if (!cfqd->rq_queued)
1650		return NULL;
1651
1652	/* There is nothing to dispatch */
1653	if (!service_tree)
1654		return NULL;
1655	if (RB_EMPTY_ROOT(&service_tree->rb))
1656		return NULL;
1657	return cfq_rb_first(service_tree);
1658}
1659
1660static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1661{
1662	struct cfq_group *cfqg;
1663	struct cfq_queue *cfqq;
1664	int i, j;
1665	struct cfq_rb_root *st;
1666
1667	if (!cfqd->rq_queued)
1668		return NULL;
1669
1670	cfqg = cfq_get_next_cfqg(cfqd);
1671	if (!cfqg)
1672		return NULL;
1673
1674	for_each_cfqg_st(cfqg, i, j, st)
1675		if ((cfqq = cfq_rb_first(st)) != NULL)
1676			return cfqq;
1677	return NULL;
1678}
1679
1680/*
1681 * Get and set a new active queue for service.
1682 */
1683static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1684					      struct cfq_queue *cfqq)
1685{
1686	if (!cfqq)
1687		cfqq = cfq_get_next_queue(cfqd);
1688
1689	__cfq_set_active_queue(cfqd, cfqq);
1690	return cfqq;
1691}
1692
1693static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1694					  struct request *rq)
1695{
1696	if (blk_rq_pos(rq) >= cfqd->last_position)
1697		return blk_rq_pos(rq) - cfqd->last_position;
1698	else
1699		return cfqd->last_position - blk_rq_pos(rq);
1700}
1701
1702static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1703			       struct request *rq)
1704{
1705	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1706}
1707
1708static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1709				    struct cfq_queue *cur_cfqq)
1710{
1711	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1712	struct rb_node *parent, *node;
1713	struct cfq_queue *__cfqq;
1714	sector_t sector = cfqd->last_position;
1715
1716	if (RB_EMPTY_ROOT(root))
1717		return NULL;
1718
1719	/*
1720	 * First, if we find a request starting at the end of the last
1721	 * request, choose it.
1722	 */
1723	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1724	if (__cfqq)
1725		return __cfqq;
1726
1727	/*
1728	 * If the exact sector wasn't found, the parent of the NULL leaf
1729	 * will contain the closest sector.
1730	 */
1731	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
1732	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1733		return __cfqq;
1734
1735	if (blk_rq_pos(__cfqq->next_rq) < sector)
1736		node = rb_next(&__cfqq->p_node);
1737	else
1738		node = rb_prev(&__cfqq->p_node);
1739	if (!node)
1740		return NULL;
1741
1742	__cfqq = rb_entry(node, struct cfq_queue, p_node);
1743	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1744		return __cfqq;
1745
1746	return NULL;
1747}
1748
1749/*
1750 * cfqd - obvious
1751 * cur_cfqq - passed in so that we don't decide that the current queue is
1752 * 	      closely cooperating with itself.
1753 *
1754 * So, basically we're assuming that that cur_cfqq has dispatched at least
1755 * one request, and that cfqd->last_position reflects a position on the disk
1756 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
1757 * assumption.
1758 */
1759static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1760					      struct cfq_queue *cur_cfqq)
1761{
1762	struct cfq_queue *cfqq;
1763
1764	if (cfq_class_idle(cur_cfqq))
1765		return NULL;
1766	if (!cfq_cfqq_sync(cur_cfqq))
1767		return NULL;
1768	if (CFQQ_SEEKY(cur_cfqq))
1769		return NULL;
1770
1771	/*
1772	 * Don't search priority tree if it's the only queue in the group.
1773	 */
1774	if (cur_cfqq->cfqg->nr_cfqq == 1)
1775		return NULL;
1776
1777	/*
1778	 * We should notice if some of the queues are cooperating, eg
1779	 * working closely on the same area of the disk. In that case,
1780	 * we can group them together and don't waste time idling.
1781	 */
1782	cfqq = cfqq_close(cfqd, cur_cfqq);
1783	if (!cfqq)
1784		return NULL;
1785
1786	/* If new queue belongs to different cfq_group, don't choose it */
1787	if (cur_cfqq->cfqg != cfqq->cfqg)
1788		return NULL;
1789
1790	/*
1791	 * It only makes sense to merge sync queues.
1792	 */
1793	if (!cfq_cfqq_sync(cfqq))
1794		return NULL;
1795	if (CFQQ_SEEKY(cfqq))
1796		return NULL;
1797
1798	/*
1799	 * Do not merge queues of different priority classes
1800	 */
1801	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1802		return NULL;
1803
1804	return cfqq;
1805}
1806
1807/*
1808 * Determine whether we should enforce idle window for this queue.
1809 */
1810
1811static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1812{
1813	enum wl_prio_t prio = cfqq_prio(cfqq);
1814	struct cfq_rb_root *service_tree = cfqq->service_tree;
1815
1816	BUG_ON(!service_tree);
1817	BUG_ON(!service_tree->count);
1818
1819	/* We never do for idle class queues. */
1820	if (prio == IDLE_WORKLOAD)
1821		return false;
1822
1823	/* We do for queues that were marked with idle window flag. */
1824	if (cfq_cfqq_idle_window(cfqq) &&
1825	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1826		return true;
1827
1828	/*
1829	 * Otherwise, we do only if they are the last ones
1830	 * in their service tree.
1831	 */
1832	if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1833		return 1;
1834	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1835			service_tree->count);
1836	return 0;
1837}
1838
1839static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1840{
1841	struct cfq_queue *cfqq = cfqd->active_queue;
1842	struct cfq_io_context *cic;
1843	unsigned long sl;
1844
1845	/*
1846	 * SSD device without seek penalty, disable idling. But only do so
1847	 * for devices that support queuing, otherwise we still have a problem
1848	 * with sync vs async workloads.
1849	 */
1850	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1851		return;
1852
1853	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1854	WARN_ON(cfq_cfqq_slice_new(cfqq));
1855
1856	/*
1857	 * idle is disabled, either manually or by past process history
1858	 */
1859	if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
1860		return;
1861
1862	/*
1863	 * still active requests from this queue, don't idle
1864	 */
1865	if (cfqq->dispatched)
1866		return;
1867
1868	/*
1869	 * task has exited, don't wait
1870	 */
1871	cic = cfqd->active_cic;
1872	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1873		return;
1874
1875	/*
1876	 * If our average think time is larger than the remaining time
1877	 * slice, then don't idle. This avoids overrunning the allotted
1878	 * time slice.
1879	 */
1880	if (sample_valid(cic->ttime_samples) &&
1881	    (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1882		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1883				cic->ttime_mean);
1884		return;
1885	}
1886
1887	cfq_mark_cfqq_wait_request(cfqq);
1888
1889	sl = cfqd->cfq_slice_idle;
1890
1891	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1892	blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1893	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1894}
1895
1896/*
1897 * Move request from internal lists to the request queue dispatch list.
1898 */
1899static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1900{
1901	struct cfq_data *cfqd = q->elevator->elevator_data;
1902	struct cfq_queue *cfqq = RQ_CFQQ(rq);
1903
1904	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1905
1906	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1907	cfq_remove_request(rq);
1908	cfqq->dispatched++;
1909	elv_dispatch_sort(q, rq);
1910
1911	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1912	blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1913					rq_data_dir(rq), rq_is_sync(rq));
1914}
1915
1916/*
1917 * return expired entry, or NULL to just start from scratch in rbtree
1918 */
1919static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1920{
1921	struct request *rq = NULL;
1922
1923	if (cfq_cfqq_fifo_expire(cfqq))
1924		return NULL;
1925
1926	cfq_mark_cfqq_fifo_expire(cfqq);
1927
1928	if (list_empty(&cfqq->fifo))
1929		return NULL;
1930
1931	rq = rq_entry_fifo(cfqq->fifo.next);
1932	if (time_before(jiffies, rq_fifo_time(rq)))
1933		rq = NULL;
1934
1935	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
1936	return rq;
1937}
1938
1939static inline int
1940cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1941{
1942	const int base_rq = cfqd->cfq_slice_async_rq;
1943
1944	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1945
1946	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1947}
1948
1949/*
1950 * Must be called with the queue_lock held.
1951 */
1952static int cfqq_process_refs(struct cfq_queue *cfqq)
1953{
1954	int process_refs, io_refs;
1955
1956	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1957	process_refs = atomic_read(&cfqq->ref) - io_refs;
1958	BUG_ON(process_refs < 0);
1959	return process_refs;
1960}
1961
1962static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1963{
1964	int process_refs, new_process_refs;
1965	struct cfq_queue *__cfqq;
1966
1967	/* Avoid a circular list and skip interim queue merges */
1968	while ((__cfqq = new_cfqq->new_cfqq)) {
1969		if (__cfqq == cfqq)
1970			return;
1971		new_cfqq = __cfqq;
1972	}
1973
1974	process_refs = cfqq_process_refs(cfqq);
1975	/*
1976	 * If the process for the cfqq has gone away, there is no
1977	 * sense in merging the queues.
1978	 */
1979	if (process_refs == 0)
1980		return;
1981
1982	/*
1983	 * Merge in the direction of the lesser amount of work.
1984	 */
1985	new_process_refs = cfqq_process_refs(new_cfqq);
1986	if (new_process_refs >= process_refs) {
1987		cfqq->new_cfqq = new_cfqq;
1988		atomic_add(process_refs, &new_cfqq->ref);
1989	} else {
1990		new_cfqq->new_cfqq = cfqq;
1991		atomic_add(new_process_refs, &cfqq->ref);
1992	}
1993}
1994
1995static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
1996				struct cfq_group *cfqg, enum wl_prio_t prio)
1997{
1998	struct cfq_queue *queue;
1999	int i;
2000	bool key_valid = false;
2001	unsigned long lowest_key = 0;
2002	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2003
2004	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2005		/* select the one with lowest rb_key */
2006		queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2007		if (queue &&
2008		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2009			lowest_key = queue->rb_key;
2010			cur_best = i;
2011			key_valid = true;
2012		}
2013	}
2014
2015	return cur_best;
2016}
2017
2018static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2019{
2020	unsigned slice;
2021	unsigned count;
2022	struct cfq_rb_root *st;
2023	unsigned group_slice;
2024
2025	if (!cfqg) {
2026		cfqd->serving_prio = IDLE_WORKLOAD;
2027		cfqd->workload_expires = jiffies + 1;
2028		return;
2029	}
2030
2031	/* Choose next priority. RT > BE > IDLE */
2032	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2033		cfqd->serving_prio = RT_WORKLOAD;
2034	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2035		cfqd->serving_prio = BE_WORKLOAD;
2036	else {
2037		cfqd->serving_prio = IDLE_WORKLOAD;
2038		cfqd->workload_expires = jiffies + 1;
2039		return;
2040	}
2041
2042	/*
2043	 * For RT and BE, we have to choose also the type
2044	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2045	 * expiration time
2046	 */
2047	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2048	count = st->count;
2049
2050	/*
2051	 * check workload expiration, and that we still have other queues ready
2052	 */
2053	if (count && !time_after(jiffies, cfqd->workload_expires))
2054		return;
2055
2056	/* otherwise select new workload type */
2057	cfqd->serving_type =
2058		cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2059	st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2060	count = st->count;
2061
2062	/*
2063	 * the workload slice is computed as a fraction of target latency
2064	 * proportional to the number of queues in that workload, over
2065	 * all the queues in the same priority class
2066	 */
2067	group_slice = cfq_group_slice(cfqd, cfqg);
2068
2069	slice = group_slice * count /
2070		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2071		      cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2072
2073	if (cfqd->serving_type == ASYNC_WORKLOAD) {
2074		unsigned int tmp;
2075
2076		/*
2077		 * Async queues are currently system wide. Just taking
2078		 * proportion of queues with-in same group will lead to higher
2079		 * async ratio system wide as generally root group is going
2080		 * to have higher weight. A more accurate thing would be to
2081		 * calculate system wide asnc/sync ratio.
2082		 */
2083		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2084		tmp = tmp/cfqd->busy_queues;
2085		slice = min_t(unsigned, slice, tmp);
2086
2087		/* async workload slice is scaled down according to
2088		 * the sync/async slice ratio. */
2089		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2090	} else
2091		/* sync workload slice is at least 2 * cfq_slice_idle */
2092		slice = max(slice, 2 * cfqd->cfq_slice_idle);
2093
2094	slice = max_t(unsigned, slice, CFQ_MIN_TT);
2095	cfq_log(cfqd, "workload slice:%d", slice);
2096	cfqd->workload_expires = jiffies + slice;
2097	cfqd->noidle_tree_requires_idle = false;
2098}
2099
2100static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2101{
2102	struct cfq_rb_root *st = &cfqd->grp_service_tree;
2103	struct cfq_group *cfqg;
2104
2105	if (RB_EMPTY_ROOT(&st->rb))
2106		return NULL;
2107	cfqg = cfq_rb_first_group(st);
2108	st->active = &cfqg->rb_node;
2109	update_min_vdisktime(st);
2110	return cfqg;
2111}
2112
2113static void cfq_choose_cfqg(struct cfq_data *cfqd)
2114{
2115	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2116
2117	cfqd->serving_group = cfqg;
2118
2119	/* Restore the workload type data */
2120	if (cfqg->saved_workload_slice) {
2121		cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2122		cfqd->serving_type = cfqg->saved_workload;
2123		cfqd->serving_prio = cfqg->saved_serving_prio;
2124	} else
2125		cfqd->workload_expires = jiffies - 1;
2126
2127	choose_service_tree(cfqd, cfqg);
2128}
2129
2130/*
2131 * Select a queue for service. If we have a current active queue,
2132 * check whether to continue servicing it, or retrieve and set a new one.
2133 */
2134static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2135{
2136	struct cfq_queue *cfqq, *new_cfqq = NULL;
2137
2138	cfqq = cfqd->active_queue;
2139	if (!cfqq)
2140		goto new_queue;
2141
2142	if (!cfqd->rq_queued)
2143		return NULL;
2144
2145	/*
2146	 * We were waiting for group to get backlogged. Expire the queue
2147	 */
2148	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2149		goto expire;
2150
2151	/*
2152	 * The active queue has run out of time, expire it and select new.
2153	 */
2154	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2155		/*
2156		 * If slice had not expired at the completion of last request
2157		 * we might not have turned on wait_busy flag. Don't expire
2158		 * the queue yet. Allow the group to get backlogged.
2159		 *
2160		 * The very fact that we have used the slice, that means we
2161		 * have been idling all along on this queue and it should be
2162		 * ok to wait for this request to complete.
2163		 */
2164		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2165		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2166			cfqq = NULL;
2167			goto keep_queue;
2168		} else
2169			goto expire;
2170	}
2171
2172	/*
2173	 * The active queue has requests and isn't expired, allow it to
2174	 * dispatch.
2175	 */
2176	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2177		goto keep_queue;
2178
2179	/*
2180	 * If another queue has a request waiting within our mean seek
2181	 * distance, let it run.  The expire code will check for close
2182	 * cooperators and put the close queue at the front of the service
2183	 * tree.  If possible, merge the expiring queue with the new cfqq.
2184	 */
2185	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2186	if (new_cfqq) {
2187		if (!cfqq->new_cfqq)
2188			cfq_setup_merge(cfqq, new_cfqq);
2189		goto expire;
2190	}
2191
2192	/*
2193	 * No requests pending. If the active queue still has requests in
2194	 * flight or is idling for a new request, allow either of these
2195	 * conditions to happen (or time out) before selecting a new queue.
2196	 */
2197	if (timer_pending(&cfqd->idle_slice_timer) ||
2198	    (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
2199		cfqq = NULL;
2200		goto keep_queue;
2201	}
2202
2203expire:
2204	cfq_slice_expired(cfqd, 0);
2205new_queue:
2206	/*
2207	 * Current queue expired. Check if we have to switch to a new
2208	 * service tree
2209	 */
2210	if (!new_cfqq)
2211		cfq_choose_cfqg(cfqd);
2212
2213	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2214keep_queue:
2215	return cfqq;
2216}
2217
2218static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2219{
2220	int dispatched = 0;
2221
2222	while (cfqq->next_rq) {
2223		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2224		dispatched++;
2225	}
2226
2227	BUG_ON(!list_empty(&cfqq->fifo));
2228
2229	/* By default cfqq is not expired if it is empty. Do it explicitly */
2230	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2231	return dispatched;
2232}
2233
2234/*
2235 * Drain our current requests. Used for barriers and when switching
2236 * io schedulers on-the-fly.
2237 */
2238static int cfq_forced_dispatch(struct cfq_data *cfqd)
2239{
2240	struct cfq_queue *cfqq;
2241	int dispatched = 0;
2242
2243	/* Expire the timeslice of the current active queue first */
2244	cfq_slice_expired(cfqd, 0);
2245	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2246		__cfq_set_active_queue(cfqd, cfqq);
2247		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2248	}
2249
2250	BUG_ON(cfqd->busy_queues);
2251
2252	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2253	return dispatched;
2254}
2255
2256static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2257	struct cfq_queue *cfqq)
2258{
2259	/* the queue hasn't finished any request, can't estimate */
2260	if (cfq_cfqq_slice_new(cfqq))
2261		return 1;
2262	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2263		cfqq->slice_end))
2264		return 1;
2265
2266	return 0;
2267}
2268
2269static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2270{
2271	unsigned int max_dispatch;
2272
2273	/*
2274	 * Drain async requests before we start sync IO
2275	 */
2276	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2277		return false;
2278
2279	/*
2280	 * If this is an async queue and we have sync IO in flight, let it wait
2281	 */
2282	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2283		return false;
2284
2285	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2286	if (cfq_class_idle(cfqq))
2287		max_dispatch = 1;
2288
2289	/*
2290	 * Does this cfqq already have too much IO in flight?
2291	 */
2292	if (cfqq->dispatched >= max_dispatch) {
2293		/*
2294		 * idle queue must always only have a single IO in flight
2295		 */
2296		if (cfq_class_idle(cfqq))
2297			return false;
2298
2299		/*
2300		 * We have other queues, don't allow more IO from this one
2301		 */
2302		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2303			return false;
2304
2305		/*
2306		 * Sole queue user, no limit
2307		 */
2308		if (cfqd->busy_queues == 1)
2309			max_dispatch = -1;
2310		else
2311			/*
2312			 * Normally we start throttling cfqq when cfq_quantum/2
2313			 * requests have been dispatched. But we can drive
2314			 * deeper queue depths at the beginning of slice
2315			 * subjected to upper limit of cfq_quantum.
2316			 * */
2317			max_dispatch = cfqd->cfq_quantum;
2318	}
2319
2320	/*
2321	 * Async queues must wait a bit before being allowed dispatch.
2322	 * We also ramp up the dispatch depth gradually for async IO,
2323	 * based on the last sync IO we serviced
2324	 */
2325	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2326		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2327		unsigned int depth;
2328
2329		depth = last_sync / cfqd->cfq_slice[1];
2330		if (!depth && !cfqq->dispatched)
2331			depth = 1;
2332		if (depth < max_dispatch)
2333			max_dispatch = depth;
2334	}
2335
2336	/*
2337	 * If we're below the current max, allow a dispatch
2338	 */
2339	return cfqq->dispatched < max_dispatch;
2340}
2341
2342/*
2343 * Dispatch a request from cfqq, moving them to the request queue
2344 * dispatch list.
2345 */
2346static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2347{
2348	struct request *rq;
2349
2350	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2351
2352	if (!cfq_may_dispatch(cfqd, cfqq))
2353		return false;
2354
2355	/*
2356	 * follow expired path, else get first next available
2357	 */
2358	rq = cfq_check_fifo(cfqq);
2359	if (!rq)
2360		rq = cfqq->next_rq;
2361
2362	/*
2363	 * insert request into driver dispatch list
2364	 */
2365	cfq_dispatch_insert(cfqd->queue, rq);
2366
2367	if (!cfqd->active_cic) {
2368		struct cfq_io_context *cic = RQ_CIC(rq);
2369
2370		atomic_long_inc(&cic->ioc->refcount);
2371		cfqd->active_cic = cic;
2372	}
2373
2374	return true;
2375}
2376
2377/*
2378 * Find the cfqq that we need to service and move a request from that to the
2379 * dispatch list
2380 */
2381static int cfq_dispatch_requests(struct request_queue *q, int force)
2382{
2383	struct cfq_data *cfqd = q->elevator->elevator_data;
2384	struct cfq_queue *cfqq;
2385
2386	if (!cfqd->busy_queues)
2387		return 0;
2388
2389	if (unlikely(force))
2390		return cfq_forced_dispatch(cfqd);
2391
2392	cfqq = cfq_select_queue(cfqd);
2393	if (!cfqq)
2394		return 0;
2395
2396	/*
2397	 * Dispatch a request from this cfqq, if it is allowed
2398	 */
2399	if (!cfq_dispatch_request(cfqd, cfqq))
2400		return 0;
2401
2402	cfqq->slice_dispatch++;
2403	cfq_clear_cfqq_must_dispatch(cfqq);
2404
2405	/*
2406	 * expire an async queue immediately if it has used up its slice. idle
2407	 * queue always expire after 1 dispatch round.
2408	 */
2409	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2410	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2411	    cfq_class_idle(cfqq))) {
2412		cfqq->slice_end = jiffies + 1;
2413		cfq_slice_expired(cfqd, 0);
2414	}
2415
2416	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2417	return 1;
2418}
2419
2420/*
2421 * task holds one reference to the queue, dropped when task exits. each rq
2422 * in-flight on this queue also holds a reference, dropped when rq is freed.
2423 *
2424 * Each cfq queue took a reference on the parent group. Drop it now.
2425 * queue lock must be held here.
2426 */
2427static void cfq_put_queue(struct cfq_queue *cfqq)
2428{
2429	struct cfq_data *cfqd = cfqq->cfqd;
2430	struct cfq_group *cfqg, *orig_cfqg;
2431
2432	BUG_ON(atomic_read(&cfqq->ref) <= 0);
2433
2434	if (!atomic_dec_and_test(&cfqq->ref))
2435		return;
2436
2437	cfq_log_cfqq(cfqd, cfqq, "put_queue");
2438	BUG_ON(rb_first(&cfqq->sort_list));
2439	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2440	cfqg = cfqq->cfqg;
2441	orig_cfqg = cfqq->orig_cfqg;
2442
2443	if (unlikely(cfqd->active_queue == cfqq)) {
2444		__cfq_slice_expired(cfqd, cfqq, 0);
2445		cfq_schedule_dispatch(cfqd);
2446	}
2447
2448	BUG_ON(cfq_cfqq_on_rr(cfqq));
2449	kmem_cache_free(cfq_pool, cfqq);
2450	cfq_put_cfqg(cfqg);
2451	if (orig_cfqg)
2452		cfq_put_cfqg(orig_cfqg);
2453}
2454
2455/*
2456 * Must always be called with the rcu_read_lock() held
2457 */
2458static void
2459__call_for_each_cic(struct io_context *ioc,
2460		    void (*func)(struct io_context *, struct cfq_io_context *))
2461{
2462	struct cfq_io_context *cic;
2463	struct hlist_node *n;
2464
2465	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2466		func(ioc, cic);
2467}
2468
2469/*
2470 * Call func for each cic attached to this ioc.
2471 */
2472static void
2473call_for_each_cic(struct io_context *ioc,
2474		  void (*func)(struct io_context *, struct cfq_io_context *))
2475{
2476	rcu_read_lock();
2477	__call_for_each_cic(ioc, func);
2478	rcu_read_unlock();
2479}
2480
2481static void cfq_cic_free_rcu(struct rcu_head *head)
2482{
2483	struct cfq_io_context *cic;
2484
2485	cic = container_of(head, struct cfq_io_context, rcu_head);
2486
2487	kmem_cache_free(cfq_ioc_pool, cic);
2488	elv_ioc_count_dec(cfq_ioc_count);
2489
2490	if (ioc_gone) {
2491		/*
2492		 * CFQ scheduler is exiting, grab exit lock and check
2493		 * the pending io context count. If it hits zero,
2494		 * complete ioc_gone and set it back to NULL
2495		 */
2496		spin_lock(&ioc_gone_lock);
2497		if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2498			complete(ioc_gone);
2499			ioc_gone = NULL;
2500		}
2501		spin_unlock(&ioc_gone_lock);
2502	}
2503}
2504
2505static void cfq_cic_free(struct cfq_io_context *cic)
2506{
2507	call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2508}
2509
2510static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2511{
2512	unsigned long flags;
2513
2514	BUG_ON(!cic->dead_key);
2515
2516	spin_lock_irqsave(&ioc->lock, flags);
2517	radix_tree_delete(&ioc->radix_root, cic->dead_key);
2518	hlist_del_rcu(&cic->cic_list);
2519	spin_unlock_irqrestore(&ioc->lock, flags);
2520
2521	cfq_cic_free(cic);
2522}
2523
2524/*
2525 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2526 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2527 * and ->trim() which is called with the task lock held
2528 */
2529static void cfq_free_io_context(struct io_context *ioc)
2530{
2531	/*
2532	 * ioc->refcount is zero here, or we are called from elv_unregister(),
2533	 * so no more cic's are allowed to be linked into this ioc.  So it
2534	 * should be ok to iterate over the known list, we will see all cic's
2535	 * since no new ones are added.
2536	 */
2537	__call_for_each_cic(ioc, cic_free_func);
2538}
2539
2540static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2541{
2542	struct cfq_queue *__cfqq, *next;
2543
2544	if (unlikely(cfqq == cfqd->active_queue)) {
2545		__cfq_slice_expired(cfqd, cfqq, 0);
2546		cfq_schedule_dispatch(cfqd);
2547	}
2548
2549	/*
2550	 * If this queue was scheduled to merge with another queue, be
2551	 * sure to drop the reference taken on that queue (and others in
2552	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
2553	 */
2554	__cfqq = cfqq->new_cfqq;
2555	while (__cfqq) {
2556		if (__cfqq == cfqq) {
2557			WARN(1, "cfqq->new_cfqq loop detected\n");
2558			break;
2559		}
2560		next = __cfqq->new_cfqq;
2561		cfq_put_queue(__cfqq);
2562		__cfqq = next;
2563	}
2564
2565	cfq_put_queue(cfqq);
2566}
2567
2568static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2569					 struct cfq_io_context *cic)
2570{
2571	struct io_context *ioc = cic->ioc;
2572
2573	list_del_init(&cic->queue_list);
2574
2575	/*
2576	 * Make sure key == NULL is seen for dead queues
2577	 */
2578	smp_wmb();
2579	cic->dead_key = (unsigned long) cic->key;
2580	cic->key = NULL;
2581
2582	if (ioc->ioc_data == cic)
2583		rcu_assign_pointer(ioc->ioc_data, NULL);
2584
2585	if (cic->cfqq[BLK_RW_ASYNC]) {
2586		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2587		cic->cfqq[BLK_RW_ASYNC] = NULL;
2588	}
2589
2590	if (cic->cfqq[BLK_RW_SYNC]) {
2591		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2592		cic->cfqq[BLK_RW_SYNC] = NULL;
2593	}
2594}
2595
2596static void cfq_exit_single_io_context(struct io_context *ioc,
2597				       struct cfq_io_context *cic)
2598{
2599	struct cfq_data *cfqd = cic->key;
2600
2601	if (cfqd) {
2602		struct request_queue *q = cfqd->queue;
2603		unsigned long flags;
2604
2605		spin_lock_irqsave(q->queue_lock, flags);
2606
2607		/*
2608		 * Ensure we get a fresh copy of the ->key to prevent
2609		 * race between exiting task and queue
2610		 */
2611		smp_read_barrier_depends();
2612		if (cic->key)
2613			__cfq_exit_single_io_context(cfqd, cic);
2614
2615		spin_unlock_irqrestore(q->queue_lock, flags);
2616	}
2617}
2618
2619/*
2620 * The process that ioc belongs to has exited, we need to clean up
2621 * and put the internal structures we have that belongs to that process.
2622 */
2623static void cfq_exit_io_context(struct io_context *ioc)
2624{
2625	call_for_each_cic(ioc, cfq_exit_single_io_context);
2626}
2627
2628static struct cfq_io_context *
2629cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2630{
2631	struct cfq_io_context *cic;
2632
2633	cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2634							cfqd->queue->node);
2635	if (cic) {
2636		cic->last_end_request = jiffies;
2637		INIT_LIST_HEAD(&cic->queue_list);
2638		INIT_HLIST_NODE(&cic->cic_list);
2639		cic->dtor = cfq_free_io_context;
2640		cic->exit = cfq_exit_io_context;
2641		elv_ioc_count_inc(cfq_ioc_count);
2642	}
2643
2644	return cic;
2645}
2646
2647static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2648{
2649	struct task_struct *tsk = current;
2650	int ioprio_class;
2651
2652	if (!cfq_cfqq_prio_changed(cfqq))
2653		return;
2654
2655	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2656	switch (ioprio_class) {
2657	default:
2658		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2659	case IOPRIO_CLASS_NONE:
2660		/*
2661		 * no prio set, inherit CPU scheduling settings
2662		 */
2663		cfqq->ioprio = task_nice_ioprio(tsk);
2664		cfqq->ioprio_class = task_nice_ioclass(tsk);
2665		break;
2666	case IOPRIO_CLASS_RT:
2667		cfqq->ioprio = task_ioprio(ioc);
2668		cfqq->ioprio_class = IOPRIO_CLASS_RT;
2669		break;
2670	case IOPRIO_CLASS_BE:
2671		cfqq->ioprio = task_ioprio(ioc);
2672		cfqq->ioprio_class = IOPRIO_CLASS_BE;
2673		break;
2674	case IOPRIO_CLASS_IDLE:
2675		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2676		cfqq->ioprio = 7;
2677		cfq_clear_cfqq_idle_window(cfqq);
2678		break;
2679	}
2680
2681	/*
2682	 * keep track of original prio settings in case we have to temporarily
2683	 * elevate the priority of this queue
2684	 */
2685	cfqq->org_ioprio = cfqq->ioprio;
2686	cfqq->org_ioprio_class = cfqq->ioprio_class;
2687	cfq_clear_cfqq_prio_changed(cfqq);
2688}
2689
2690static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2691{
2692	struct cfq_data *cfqd = cic->key;
2693	struct cfq_queue *cfqq;
2694	unsigned long flags;
2695
2696	if (unlikely(!cfqd))
2697		return;
2698
2699	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2700
2701	cfqq = cic->cfqq[BLK_RW_ASYNC];
2702	if (cfqq) {
2703		struct cfq_queue *new_cfqq;
2704		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2705						GFP_ATOMIC);
2706		if (new_cfqq) {
2707			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2708			cfq_put_queue(cfqq);
2709		}
2710	}
2711
2712	cfqq = cic->cfqq[BLK_RW_SYNC];
2713	if (cfqq)
2714		cfq_mark_cfqq_prio_changed(cfqq);
2715
2716	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2717}
2718
2719static void cfq_ioc_set_ioprio(struct io_context *ioc)
2720{
2721	call_for_each_cic(ioc, changed_ioprio);
2722	ioc->ioprio_changed = 0;
2723}
2724
2725static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2726			  pid_t pid, bool is_sync)
2727{
2728	RB_CLEAR_NODE(&cfqq->rb_node);
2729	RB_CLEAR_NODE(&cfqq->p_node);
2730	INIT_LIST_HEAD(&cfqq->fifo);
2731
2732	atomic_set(&cfqq->ref, 0);
2733	cfqq->cfqd = cfqd;
2734
2735	cfq_mark_cfqq_prio_changed(cfqq);
2736
2737	if (is_sync) {
2738		if (!cfq_class_idle(cfqq))
2739			cfq_mark_cfqq_idle_window(cfqq);
2740		cfq_mark_cfqq_sync(cfqq);
2741	}
2742	cfqq->pid = pid;
2743}
2744
2745#ifdef CONFIG_CFQ_GROUP_IOSCHED
2746static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2747{
2748	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2749	struct cfq_data *cfqd = cic->key;
2750	unsigned long flags;
2751	struct request_queue *q;
2752
2753	if (unlikely(!cfqd))
2754		return;
2755
2756	q = cfqd->queue;
2757
2758	spin_lock_irqsave(q->queue_lock, flags);
2759
2760	if (sync_cfqq) {
2761		/*
2762		 * Drop reference to sync queue. A new sync queue will be
2763		 * assigned in new group upon arrival of a fresh request.
2764		 */
2765		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2766		cic_set_cfqq(cic, NULL, 1);
2767		cfq_put_queue(sync_cfqq);
2768	}
2769
2770	spin_unlock_irqrestore(q->queue_lock, flags);
2771}
2772
2773static void cfq_ioc_set_cgroup(struct io_context *ioc)
2774{
2775	call_for_each_cic(ioc, changed_cgroup);
2776	ioc->cgroup_changed = 0;
2777}
2778#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
2779
2780static struct cfq_queue *
2781cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2782		     struct io_context *ioc, gfp_t gfp_mask)
2783{
2784	struct cfq_queue *cfqq, *new_cfqq = NULL;
2785	struct cfq_io_context *cic;
2786	struct cfq_group *cfqg;
2787
2788retry:
2789	cfqg = cfq_get_cfqg(cfqd, 1);
2790	cic = cfq_cic_lookup(cfqd, ioc);
2791	/* cic always exists here */
2792	cfqq = cic_to_cfqq(cic, is_sync);
2793
2794	/*
2795	 * Always try a new alloc if we fell back to the OOM cfqq
2796	 * originally, since it should just be a temporary situation.
2797	 */
2798	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2799		cfqq = NULL;
2800		if (new_cfqq) {
2801			cfqq = new_cfqq;
2802			new_cfqq = NULL;
2803		} else if (gfp_mask & __GFP_WAIT) {
2804			spin_unlock_irq(cfqd->queue->queue_lock);
2805			new_cfqq = kmem_cache_alloc_node(cfq_pool,
2806					gfp_mask | __GFP_ZERO,
2807					cfqd->queue->node);
2808			spin_lock_irq(cfqd->queue->queue_lock);
2809			if (new_cfqq)
2810				goto retry;
2811		} else {
2812			cfqq = kmem_cache_alloc_node(cfq_pool,
2813					gfp_mask | __GFP_ZERO,
2814					cfqd->queue->node);
2815		}
2816
2817		if (cfqq) {
2818			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2819			cfq_init_prio_data(cfqq, ioc);
2820			cfq_link_cfqq_cfqg(cfqq, cfqg);
2821			cfq_log_cfqq(cfqd, cfqq, "alloced");
2822		} else
2823			cfqq = &cfqd->oom_cfqq;
2824	}
2825
2826	if (new_cfqq)
2827		kmem_cache_free(cfq_pool, new_cfqq);
2828
2829	return cfqq;
2830}
2831
2832static struct cfq_queue **
2833cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2834{
2835	switch (ioprio_class) {
2836	case IOPRIO_CLASS_RT:
2837		return &cfqd->async_cfqq[0][ioprio];
2838	case IOPRIO_CLASS_BE:
2839		return &cfqd->async_cfqq[1][ioprio];
2840	case IOPRIO_CLASS_IDLE:
2841		return &cfqd->async_idle_cfqq;
2842	default:
2843		BUG();
2844	}
2845}
2846
2847static struct cfq_queue *
2848cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2849	      gfp_t gfp_mask)
2850{
2851	const int ioprio = task_ioprio(ioc);
2852	const int ioprio_class = task_ioprio_class(ioc);
2853	struct cfq_queue **async_cfqq = NULL;
2854	struct cfq_queue *cfqq = NULL;
2855
2856	if (!is_sync) {
2857		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2858		cfqq = *async_cfqq;
2859	}
2860
2861	if (!cfqq)
2862		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2863
2864	/*
2865	 * pin the queue now that it's allocated, scheduler exit will prune it
2866	 */
2867	if (!is_sync && !(*async_cfqq)) {
2868		atomic_inc(&cfqq->ref);
2869		*async_cfqq = cfqq;
2870	}
2871
2872	atomic_inc(&cfqq->ref);
2873	return cfqq;
2874}
2875
2876/*
2877 * We drop cfq io contexts lazily, so we may find a dead one.
2878 */
2879static void
2880cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2881		  struct cfq_io_context *cic)
2882{
2883	unsigned long flags;
2884
2885	WARN_ON(!list_empty(&cic->queue_list));
2886
2887	spin_lock_irqsave(&ioc->lock, flags);
2888
2889	BUG_ON(ioc->ioc_data == cic);
2890
2891	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
2892	hlist_del_rcu(&cic->cic_list);
2893	spin_unlock_irqrestore(&ioc->lock, flags);
2894
2895	cfq_cic_free(cic);
2896}
2897
2898static struct cfq_io_context *
2899cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
2900{
2901	struct cfq_io_context *cic;
2902	unsigned long flags;
2903	void *k;
2904
2905	if (unlikely(!ioc))
2906		return NULL;
2907
2908	rcu_read_lock();
2909
2910	/*
2911	 * we maintain a last-hit cache, to avoid browsing over the tree
2912	 */
2913	cic = rcu_dereference(ioc->ioc_data);
2914	if (cic && cic->key == cfqd) {
2915		rcu_read_unlock();
2916		return cic;
2917	}
2918
2919	do {
2920		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2921		rcu_read_unlock();
2922		if (!cic)
2923			break;
2924		/* ->key must be copied to avoid race with cfq_exit_queue() */
2925		k = cic->key;
2926		if (unlikely(!k)) {
2927			cfq_drop_dead_cic(cfqd, ioc, cic);
2928			rcu_read_lock();
2929			continue;
2930		}
2931
2932		spin_lock_irqsave(&ioc->lock, flags);
2933		rcu_assign_pointer(ioc->ioc_data, cic);
2934		spin_unlock_irqrestore(&ioc->lock, flags);
2935		break;
2936	} while (1);
2937
2938	return cic;
2939}
2940
2941/*
2942 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2943 * the process specific cfq io context when entered from the block layer.
2944 * Also adds the cic to a per-cfqd list, used when this queue is removed.
2945 */
2946static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2947			struct cfq_io_context *cic, gfp_t gfp_mask)
2948{
2949	unsigned long flags;
2950	int ret;
2951
2952	ret = radix_tree_preload(gfp_mask);
2953	if (!ret) {
2954		cic->ioc = ioc;
2955		cic->key = cfqd;
2956
2957		spin_lock_irqsave(&ioc->lock, flags);
2958		ret = radix_tree_insert(&ioc->radix_root,
2959						(unsigned long) cfqd, cic);
2960		if (!ret)
2961			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
2962		spin_unlock_irqrestore(&ioc->lock, flags);
2963
2964		radix_tree_preload_end();
2965
2966		if (!ret) {
2967			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2968			list_add(&cic->queue_list, &cfqd->cic_list);
2969			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2970		}
2971	}
2972
2973	if (ret)
2974		printk(KERN_ERR "cfq: cic link failed!\n");
2975
2976	return ret;
2977}
2978
2979/*
2980 * Setup general io context and cfq io context. There can be several cfq
2981 * io contexts per general io context, if this process is doing io to more
2982 * than one device managed by cfq.
2983 */
2984static struct cfq_io_context *
2985cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2986{
2987	struct io_context *ioc = NULL;
2988	struct cfq_io_context *cic;
2989
2990	might_sleep_if(gfp_mask & __GFP_WAIT);
2991
2992	ioc = get_io_context(gfp_mask, cfqd->queue->node);
2993	if (!ioc)
2994		return NULL;
2995
2996	cic = cfq_cic_lookup(cfqd, ioc);
2997	if (cic)
2998		goto out;
2999
3000	cic = cfq_alloc_io_context(cfqd, gfp_mask);
3001	if (cic == NULL)
3002		goto err;
3003
3004	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3005		goto err_free;
3006
3007out:
3008	smp_read_barrier_depends();
3009	if (unlikely(ioc->ioprio_changed))
3010		cfq_ioc_set_ioprio(ioc);
3011
3012#ifdef CONFIG_CFQ_GROUP_IOSCHED
3013	if (unlikely(ioc->cgroup_changed))
3014		cfq_ioc_set_cgroup(ioc);
3015#endif
3016	return cic;
3017err_free:
3018	cfq_cic_free(cic);
3019err:
3020	put_io_context(ioc);
3021	return NULL;
3022}
3023
3024static void
3025cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3026{
3027	unsigned long elapsed = jiffies - cic->last_end_request;
3028	unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3029
3030	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3031	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3032	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3033}
3034
3035static void
3036cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3037		       struct request *rq)
3038{
3039	sector_t sdist = 0;
3040	sector_t n_sec = blk_rq_sectors(rq);
3041	if (cfqq->last_request_pos) {
3042		if (cfqq->last_request_pos < blk_rq_pos(rq))
3043			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3044		else
3045			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3046	}
3047
3048	cfqq->seek_history <<= 1;
3049	if (blk_queue_nonrot(cfqd->queue))
3050		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3051	else
3052		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3053}
3054
3055/*
3056 * Disable idle window if the process thinks too long or seeks so much that
3057 * it doesn't matter
3058 */
3059static void
3060cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3061		       struct cfq_io_context *cic)
3062{
3063	int old_idle, enable_idle;
3064
3065	/*
3066	 * Don't idle for async or idle io prio class
3067	 */
3068	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3069		return;
3070
3071	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3072
3073	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3074		cfq_mark_cfqq_deep(cfqq);
3075
3076	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3077	    (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3078		enable_idle = 0;
3079	else if (sample_valid(cic->ttime_samples)) {
3080		if (cic->ttime_mean > cfqd->cfq_slice_idle)
3081			enable_idle = 0;
3082		else
3083			enable_idle = 1;
3084	}
3085
3086	if (old_idle != enable_idle) {
3087		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3088		if (enable_idle)
3089			cfq_mark_cfqq_idle_window(cfqq);
3090		else
3091			cfq_clear_cfqq_idle_window(cfqq);
3092	}
3093}
3094
3095/*
3096 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3097 * no or if we aren't sure, a 1 will cause a preempt.
3098 */
3099static bool
3100cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3101		   struct request *rq)
3102{
3103	struct cfq_queue *cfqq;
3104
3105	cfqq = cfqd->active_queue;
3106	if (!cfqq)
3107		return false;
3108
3109	if (cfq_class_idle(new_cfqq))
3110		return false;
3111
3112	if (cfq_class_idle(cfqq))
3113		return true;
3114
3115	/*
3116	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3117	 */
3118	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3119		return false;
3120
3121	/*
3122	 * if the new request is sync, but the currently running queue is
3123	 * not, let the sync request have priority.
3124	 */
3125	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3126		return true;
3127
3128	if (new_cfqq->cfqg != cfqq->cfqg)
3129		return false;
3130
3131	if (cfq_slice_used(cfqq))
3132		return true;
3133
3134	/* Allow preemption only if we are idling on sync-noidle tree */
3135	if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3136	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3137	    new_cfqq->service_tree->count == 2 &&
3138	    RB_EMPTY_ROOT(&cfqq->sort_list))
3139		return true;
3140
3141	/*
3142	 * So both queues are sync. Let the new request get disk time if
3143	 * it's a metadata request and the current queue is doing regular IO.
3144	 */
3145	if (rq_is_meta(rq) && !cfqq->meta_pending)
3146		return true;
3147
3148	/*
3149	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3150	 */
3151	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3152		return true;
3153
3154	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3155		return false;
3156
3157	/*
3158	 * if this request is as-good as one we would expect from the
3159	 * current cfqq, let it preempt
3160	 */
3161	if (cfq_rq_close(cfqd, cfqq, rq))
3162		return true;
3163
3164	return false;
3165}
3166
3167/*
3168 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3169 * let it have half of its nominal slice.
3170 */
3171static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3172{
3173	cfq_log_cfqq(cfqd, cfqq, "preempt");
3174	cfq_slice_expired(cfqd, 1);
3175
3176	/*
3177	 * Put the new queue at the front of the of the current list,
3178	 * so we know that it will be selected next.
3179	 */
3180	BUG_ON(!cfq_cfqq_on_rr(cfqq));
3181
3182	cfq_service_tree_add(cfqd, cfqq, 1);
3183
3184	cfqq->slice_end = 0;
3185	cfq_mark_cfqq_slice_new(cfqq);
3186}
3187
3188/*
3189 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3190 * something we should do about it
3191 */
3192static void
3193cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3194		struct request *rq)
3195{
3196	struct cfq_io_context *cic = RQ_CIC(rq);
3197
3198	cfqd->rq_queued++;
3199	if (rq_is_meta(rq))
3200		cfqq->meta_pending++;
3201
3202	cfq_update_io_thinktime(cfqd, cic);
3203	cfq_update_io_seektime(cfqd, cfqq, rq);
3204	cfq_update_idle_window(cfqd, cfqq, cic);
3205
3206	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3207
3208	if (cfqq == cfqd->active_queue) {
3209		/*
3210		 * Remember that we saw a request from this process, but
3211		 * don't start queuing just yet. Otherwise we risk seeing lots
3212		 * of tiny requests, because we disrupt the normal plugging
3213		 * and merging. If the request is already larger than a single
3214		 * page, let it rip immediately. For that case we assume that
3215		 * merging is already done. Ditto for a busy system that
3216		 * has other work pending, don't risk delaying until the
3217		 * idle timer unplug to continue working.
3218		 */
3219		if (cfq_cfqq_wait_request(cfqq)) {
3220			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3221			    cfqd->busy_queues > 1) {
3222				cfq_del_timer(cfqd, cfqq);
3223				cfq_clear_cfqq_wait_request(cfqq);
3224				__blk_run_queue(cfqd->queue);
3225			} else {
3226				blkiocg_update_idle_time_stats(
3227						&cfqq->cfqg->blkg);
3228				cfq_mark_cfqq_must_dispatch(cfqq);
3229			}
3230		}
3231	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3232		/*
3233		 * not the active queue - expire current slice if it is
3234		 * idle and has expired it's mean thinktime or this new queue
3235		 * has some old slice time left and is of higher priority or
3236		 * this new queue is RT and the current one is BE
3237		 */
3238		cfq_preempt_queue(cfqd, cfqq);
3239		__blk_run_queue(cfqd->queue);
3240	}
3241}
3242
3243static void cfq_insert_request(struct request_queue *q, struct request *rq)
3244{
3245	struct cfq_data *cfqd = q->elevator->elevator_data;
3246	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3247
3248	cfq_log_cfqq(cfqd, cfqq, "insert_request");
3249	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3250
3251	rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3252	list_add_tail(&rq->queuelist, &cfqq->fifo);
3253	cfq_add_rq_rb(rq);
3254	blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3255			&cfqd->serving_group->blkg, rq_data_dir(rq),
3256			rq_is_sync(rq));
3257	cfq_rq_enqueued(cfqd, cfqq, rq);
3258}
3259
3260/*
3261 * Update hw_tag based on peak queue depth over 50 samples under
3262 * sufficient load.
3263 */
3264static void cfq_update_hw_tag(struct cfq_data *cfqd)
3265{
3266	struct cfq_queue *cfqq = cfqd->active_queue;
3267
3268	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3269		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3270
3271	if (cfqd->hw_tag == 1)
3272		return;
3273
3274	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3275	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3276		return;
3277
3278	/*
3279	 * If active queue hasn't enough requests and can idle, cfq might not
3280	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3281	 * case
3282	 */
3283	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3284	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3285	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3286		return;
3287
3288	if (cfqd->hw_tag_samples++ < 50)
3289		return;
3290
3291	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3292		cfqd->hw_tag = 1;
3293	else
3294		cfqd->hw_tag = 0;
3295}
3296
3297static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3298{
3299	struct cfq_io_context *cic = cfqd->active_cic;
3300
3301	/* If there are other queues in the group, don't wait */
3302	if (cfqq->cfqg->nr_cfqq > 1)
3303		return false;
3304
3305	if (cfq_slice_used(cfqq))
3306		return true;
3307
3308	/* if slice left is less than think time, wait busy */
3309	if (cic && sample_valid(cic->ttime_samples)
3310	    && (cfqq->slice_end - jiffies < cic->ttime_mean))
3311		return true;
3312
3313	/*
3314	 * If think times is less than a jiffy than ttime_mean=0 and above
3315	 * will not be true. It might happen that slice has not expired yet
3316	 * but will expire soon (4-5 ns) during select_queue(). To cover the
3317	 * case where think time is less than a jiffy, mark the queue wait
3318	 * busy if only 1 jiffy is left in the slice.
3319	 */
3320	if (cfqq->slice_end - jiffies == 1)
3321		return true;
3322
3323	return false;
3324}
3325
3326static void cfq_completed_request(struct request_queue *q, struct request *rq)
3327{
3328	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3329	struct cfq_data *cfqd = cfqq->cfqd;
3330	const int sync = rq_is_sync(rq);
3331	unsigned long now;
3332
3333	now = jiffies;
3334	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
3335
3336	cfq_update_hw_tag(cfqd);
3337
3338	WARN_ON(!cfqd->rq_in_driver);
3339	WARN_ON(!cfqq->dispatched);
3340	cfqd->rq_in_driver--;
3341	cfqq->dispatched--;
3342	blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
3343			rq_io_start_time_ns(rq), rq_data_dir(rq),
3344			rq_is_sync(rq));
3345
3346	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3347
3348	if (sync) {
3349		RQ_CIC(rq)->last_end_request = now;
3350		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3351			cfqd->last_delayed_sync = now;
3352	}
3353
3354	/*
3355	 * If this is the active queue, check if it needs to be expired,
3356	 * or if we want to idle in case it has no pending requests.
3357	 */
3358	if (cfqd->active_queue == cfqq) {
3359		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3360
3361		if (cfq_cfqq_slice_new(cfqq)) {
3362			cfq_set_prio_slice(cfqd, cfqq);
3363			cfq_clear_cfqq_slice_new(cfqq);
3364		}
3365
3366		/*
3367		 * Should we wait for next request to come in before we expire
3368		 * the queue.
3369		 */
3370		if (cfq_should_wait_busy(cfqd, cfqq)) {
3371			cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3372			cfq_mark_cfqq_wait_busy(cfqq);
3373			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3374		}
3375
3376		/*
3377		 * Idling is not enabled on:
3378		 * - expired queues
3379		 * - idle-priority queues
3380		 * - async queues
3381		 * - queues with still some requests queued
3382		 * - when there is a close cooperator
3383		 */
3384		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3385			cfq_slice_expired(cfqd, 1);
3386		else if (sync && cfqq_empty &&
3387			 !cfq_close_cooperator(cfqd, cfqq)) {
3388			cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
3389			/*
3390			 * Idling is enabled for SYNC_WORKLOAD.
3391			 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
3392			 * only if we processed at least one !rq_noidle request
3393			 */
3394			if (cfqd->serving_type == SYNC_WORKLOAD
3395			    || cfqd->noidle_tree_requires_idle
3396			    || cfqq->cfqg->nr_cfqq == 1)
3397				cfq_arm_slice_timer(cfqd);
3398		}
3399	}
3400
3401	if (!cfqd->rq_in_driver)
3402		cfq_schedule_dispatch(cfqd);
3403}
3404
3405/*
3406 * we temporarily boost lower priority queues if they are holding fs exclusive
3407 * resources. they are boosted to normal prio (CLASS_BE/4)
3408 */
3409static void cfq_prio_boost(struct cfq_queue *cfqq)
3410{
3411	if (has_fs_excl()) {
3412		/*
3413		 * boost idle prio on transactions that would lock out other
3414		 * users of the filesystem
3415		 */
3416		if (cfq_class_idle(cfqq))
3417			cfqq->ioprio_class = IOPRIO_CLASS_BE;
3418		if (cfqq->ioprio > IOPRIO_NORM)
3419			cfqq->ioprio = IOPRIO_NORM;
3420	} else {
3421		/*
3422		 * unboost the queue (if needed)
3423		 */
3424		cfqq->ioprio_class = cfqq->org_ioprio_class;
3425		cfqq->ioprio = cfqq->org_ioprio;
3426	}
3427}
3428
3429static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3430{
3431	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3432		cfq_mark_cfqq_must_alloc_slice(cfqq);
3433		return ELV_MQUEUE_MUST;
3434	}
3435
3436	return ELV_MQUEUE_MAY;
3437}
3438
3439static int cfq_may_queue(struct request_queue *q, int rw)
3440{
3441	struct cfq_data *cfqd = q->elevator->elevator_data;
3442	struct task_struct *tsk = current;
3443	struct cfq_io_context *cic;
3444	struct cfq_queue *cfqq;
3445
3446	/*
3447	 * don't force setup of a queue from here, as a call to may_queue
3448	 * does not necessarily imply that a request actually will be queued.
3449	 * so just lookup a possibly existing queue, or return 'may queue'
3450	 * if that fails
3451	 */
3452	cic = cfq_cic_lookup(cfqd, tsk->io_context);
3453	if (!cic)
3454		return ELV_MQUEUE_MAY;
3455
3456	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3457	if (cfqq) {
3458		cfq_init_prio_data(cfqq, cic->ioc);
3459		cfq_prio_boost(cfqq);
3460
3461		return __cfq_may_queue(cfqq);
3462	}
3463
3464	return ELV_MQUEUE_MAY;
3465}
3466
3467/*
3468 * queue lock held here
3469 */
3470static void cfq_put_request(struct request *rq)
3471{
3472	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3473
3474	if (cfqq) {
3475		const int rw = rq_data_dir(rq);
3476
3477		BUG_ON(!cfqq->allocated[rw]);
3478		cfqq->allocated[rw]--;
3479
3480		put_io_context(RQ_CIC(rq)->ioc);
3481
3482		rq->elevator_private = NULL;
3483		rq->elevator_private2 = NULL;
3484
3485		/* Put down rq reference on cfqg */
3486		cfq_put_cfqg(RQ_CFQG(rq));
3487		rq->elevator_private3 = NULL;
3488
3489		cfq_put_queue(cfqq);
3490	}
3491}
3492
3493static struct cfq_queue *
3494cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3495		struct cfq_queue *cfqq)
3496{
3497	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3498	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3499	cfq_mark_cfqq_coop(cfqq->new_cfqq);
3500	cfq_put_queue(cfqq);
3501	return cic_to_cfqq(cic, 1);
3502}
3503
3504/*
3505 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3506 * was the last process referring to said cfqq.
3507 */
3508static struct cfq_queue *
3509split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3510{
3511	if (cfqq_process_refs(cfqq) == 1) {
3512		cfqq->pid = current->pid;
3513		cfq_clear_cfqq_coop(cfqq);
3514		cfq_clear_cfqq_split_coop(cfqq);
3515		return cfqq;
3516	}
3517
3518	cic_set_cfqq(cic, NULL, 1);
3519	cfq_put_queue(cfqq);
3520	return NULL;
3521}
3522/*
3523 * Allocate cfq data structures associated with this request.
3524 */
3525static int
3526cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3527{
3528	struct cfq_data *cfqd = q->elevator->elevator_data;
3529	struct cfq_io_context *cic;
3530	const int rw = rq_data_dir(rq);
3531	const bool is_sync = rq_is_sync(rq);
3532	struct cfq_queue *cfqq;
3533	unsigned long flags;
3534
3535	might_sleep_if(gfp_mask & __GFP_WAIT);
3536
3537	cic = cfq_get_io_context(cfqd, gfp_mask);
3538
3539	spin_lock_irqsave(q->queue_lock, flags);
3540
3541	if (!cic)
3542		goto queue_fail;
3543
3544new_queue:
3545	cfqq = cic_to_cfqq(cic, is_sync);
3546	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3547		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3548		cic_set_cfqq(cic, cfqq, is_sync);
3549	} else {
3550		/*
3551		 * If the queue was seeky for too long, break it apart.
3552		 */
3553		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3554			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3555			cfqq = split_cfqq(cic, cfqq);
3556			if (!cfqq)
3557				goto new_queue;
3558		}
3559
3560		/*
3561		 * Check to see if this queue is scheduled to merge with
3562		 * another, closely cooperating queue.  The merging of
3563		 * queues happens here as it must be done in process context.
3564		 * The reference on new_cfqq was taken in merge_cfqqs.
3565		 */
3566		if (cfqq->new_cfqq)
3567			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3568	}
3569
3570	cfqq->allocated[rw]++;
3571	atomic_inc(&cfqq->ref);
3572
3573	spin_unlock_irqrestore(q->queue_lock, flags);
3574
3575	rq->elevator_private = cic;
3576	rq->elevator_private2 = cfqq;
3577	rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3578	return 0;
3579
3580queue_fail:
3581	if (cic)
3582		put_io_context(cic->ioc);
3583
3584	cfq_schedule_dispatch(cfqd);
3585	spin_unlock_irqrestore(q->queue_lock, flags);
3586	cfq_log(cfqd, "set_request fail");
3587	return 1;
3588}
3589
3590static void cfq_kick_queue(struct work_struct *work)
3591{
3592	struct cfq_data *cfqd =
3593		container_of(work, struct cfq_data, unplug_work);
3594	struct request_queue *q = cfqd->queue;
3595
3596	spin_lock_irq(q->queue_lock);
3597	__blk_run_queue(cfqd->queue);
3598	spin_unlock_irq(q->queue_lock);
3599}
3600
3601/*
3602 * Timer running if the active_queue is currently idling inside its time slice
3603 */
3604static void cfq_idle_slice_timer(unsigned long data)
3605{
3606	struct cfq_data *cfqd = (struct cfq_data *) data;
3607	struct cfq_queue *cfqq;
3608	unsigned long flags;
3609	int timed_out = 1;
3610
3611	cfq_log(cfqd, "idle timer fired");
3612
3613	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3614
3615	cfqq = cfqd->active_queue;
3616	if (cfqq) {
3617		timed_out = 0;
3618
3619		/*
3620		 * We saw a request before the queue expired, let it through
3621		 */
3622		if (cfq_cfqq_must_dispatch(cfqq))
3623			goto out_kick;
3624
3625		/*
3626		 * expired
3627		 */
3628		if (cfq_slice_used(cfqq))
3629			goto expire;
3630
3631		/*
3632		 * only expire and reinvoke request handler, if there are
3633		 * other queues with pending requests
3634		 */
3635		if (!cfqd->busy_queues)
3636			goto out_cont;
3637
3638		/*
3639		 * not expired and it has a request pending, let it dispatch
3640		 */
3641		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3642			goto out_kick;
3643
3644		/*
3645		 * Queue depth flag is reset only when the idle didn't succeed
3646		 */
3647		cfq_clear_cfqq_deep(cfqq);
3648	}
3649expire:
3650	cfq_slice_expired(cfqd, timed_out);
3651out_kick:
3652	cfq_schedule_dispatch(cfqd);
3653out_cont:
3654	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3655}
3656
3657static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3658{
3659	del_timer_sync(&cfqd->idle_slice_timer);
3660	cancel_work_sync(&cfqd->unplug_work);
3661}
3662
3663static void cfq_put_async_queues(struct cfq_data *cfqd)
3664{
3665	int i;
3666
3667	for (i = 0; i < IOPRIO_BE_NR; i++) {
3668		if (cfqd->async_cfqq[0][i])
3669			cfq_put_queue(cfqd->async_cfqq[0][i]);
3670		if (cfqd->async_cfqq[1][i])
3671			cfq_put_queue(cfqd->async_cfqq[1][i]);
3672	}
3673
3674	if (cfqd->async_idle_cfqq)
3675		cfq_put_queue(cfqd->async_idle_cfqq);
3676}
3677
3678static void cfq_cfqd_free(struct rcu_head *head)
3679{
3680	kfree(container_of(head, struct cfq_data, rcu));
3681}
3682
3683static void cfq_exit_queue(struct elevator_queue *e)
3684{
3685	struct cfq_data *cfqd = e->elevator_data;
3686	struct request_queue *q = cfqd->queue;
3687
3688	cfq_shutdown_timer_wq(cfqd);
3689
3690	spin_lock_irq(q->queue_lock);
3691
3692	if (cfqd->active_queue)
3693		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3694
3695	while (!list_empty(&cfqd->cic_list)) {
3696		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3697							struct cfq_io_context,
3698							queue_list);
3699
3700		__cfq_exit_single_io_context(cfqd, cic);
3701	}
3702
3703	cfq_put_async_queues(cfqd);
3704	cfq_release_cfq_groups(cfqd);
3705	blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3706
3707	spin_unlock_irq(q->queue_lock);
3708
3709	cfq_shutdown_timer_wq(cfqd);
3710
3711	/* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3712	call_rcu(&cfqd->rcu, cfq_cfqd_free);
3713}
3714
3715static void *cfq_init_queue(struct request_queue *q)
3716{
3717	struct cfq_data *cfqd;
3718	int i, j;
3719	struct cfq_group *cfqg;
3720	struct cfq_rb_root *st;
3721
3722	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3723	if (!cfqd)
3724		return NULL;
3725
3726	/* Init root service tree */
3727	cfqd->grp_service_tree = CFQ_RB_ROOT;
3728
3729	/* Init root group */
3730	cfqg = &cfqd->root_group;
3731	for_each_cfqg_st(cfqg, i, j, st)
3732		*st = CFQ_RB_ROOT;
3733	RB_CLEAR_NODE(&cfqg->rb_node);
3734
3735	/* Give preference to root group over other groups */
3736	cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3737
3738#ifdef CONFIG_CFQ_GROUP_IOSCHED
3739	/*
3740	 * Take a reference to root group which we never drop. This is just
3741	 * to make sure that cfq_put_cfqg() does not try to kfree root group
3742	 */
3743	atomic_set(&cfqg->ref, 1);
3744	blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3745					0);
3746#endif
3747	/*
3748	 * Not strictly needed (since RB_ROOT just clears the node and we
3749	 * zeroed cfqd on alloc), but better be safe in case someone decides
3750	 * to add magic to the rb code
3751	 */
3752	for (i = 0; i < CFQ_PRIO_LISTS; i++)
3753		cfqd->prio_trees[i] = RB_ROOT;
3754
3755	/*
3756	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3757	 * Grab a permanent reference to it, so that the normal code flow
3758	 * will not attempt to free it.
3759	 */
3760	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3761	atomic_inc(&cfqd->oom_cfqq.ref);
3762	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3763
3764	INIT_LIST_HEAD(&cfqd->cic_list);
3765
3766	cfqd->queue = q;
3767
3768	init_timer(&cfqd->idle_slice_timer);
3769	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3770	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3771
3772	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3773
3774	cfqd->cfq_quantum = cfq_quantum;
3775	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3776	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3777	cfqd->cfq_back_max = cfq_back_max;
3778	cfqd->cfq_back_penalty = cfq_back_penalty;
3779	cfqd->cfq_slice[0] = cfq_slice_async;
3780	cfqd->cfq_slice[1] = cfq_slice_sync;
3781	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3782	cfqd->cfq_slice_idle = cfq_slice_idle;
3783	cfqd->cfq_latency = 1;
3784	cfqd->cfq_group_isolation = 0;
3785	cfqd->hw_tag = -1;
3786	/*
3787	 * we optimistically start assuming sync ops weren't delayed in last
3788	 * second, in order to have larger depth for async operations.
3789	 */
3790	cfqd->last_delayed_sync = jiffies - HZ;
3791	INIT_RCU_HEAD(&cfqd->rcu);
3792	return cfqd;
3793}
3794
3795static void cfq_slab_kill(void)
3796{
3797	/*
3798	 * Caller already ensured that pending RCU callbacks are completed,
3799	 * so we should have no busy allocations at this point.
3800	 */
3801	if (cfq_pool)
3802		kmem_cache_destroy(cfq_pool);
3803	if (cfq_ioc_pool)
3804		kmem_cache_destroy(cfq_ioc_pool);
3805}
3806
3807static int __init cfq_slab_setup(void)
3808{
3809	cfq_pool = KMEM_CACHE(cfq_queue, 0);
3810	if (!cfq_pool)
3811		goto fail;
3812
3813	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3814	if (!cfq_ioc_pool)
3815		goto fail;
3816
3817	return 0;
3818fail:
3819	cfq_slab_kill();
3820	return -ENOMEM;
3821}
3822
3823/*
3824 * sysfs parts below -->
3825 */
3826static ssize_t
3827cfq_var_show(unsigned int var, char *page)
3828{
3829	return sprintf(page, "%d\n", var);
3830}
3831
3832static ssize_t
3833cfq_var_store(unsigned int *var, const char *page, size_t count)
3834{
3835	char *p = (char *) page;
3836
3837	*var = simple_strtoul(p, &p, 10);
3838	return count;
3839}
3840
3841#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
3842static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
3843{									\
3844	struct cfq_data *cfqd = e->elevator_data;			\
3845	unsigned int __data = __VAR;					\
3846	if (__CONV)							\
3847		__data = jiffies_to_msecs(__data);			\
3848	return cfq_var_show(__data, (page));				\
3849}
3850SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3851SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3852SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3853SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3854SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3855SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3856SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3857SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3858SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3859SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3860SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
3861#undef SHOW_FUNCTION
3862
3863#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
3864static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
3865{									\
3866	struct cfq_data *cfqd = e->elevator_data;			\
3867	unsigned int __data;						\
3868	int ret = cfq_var_store(&__data, (page), count);		\
3869	if (__data < (MIN))						\
3870		__data = (MIN);						\
3871	else if (__data > (MAX))					\
3872		__data = (MAX);						\
3873	if (__CONV)							\
3874		*(__PTR) = msecs_to_jiffies(__data);			\
3875	else								\
3876		*(__PTR) = __data;					\
3877	return ret;							\
3878}
3879STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3880STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3881		UINT_MAX, 1);
3882STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3883		UINT_MAX, 1);
3884STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3885STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3886		UINT_MAX, 0);
3887STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3888STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3889STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3890STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3891		UINT_MAX, 0);
3892STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3893STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
3894#undef STORE_FUNCTION
3895
3896#define CFQ_ATTR(name) \
3897	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3898
3899static struct elv_fs_entry cfq_attrs[] = {
3900	CFQ_ATTR(quantum),
3901	CFQ_ATTR(fifo_expire_sync),
3902	CFQ_ATTR(fifo_expire_async),
3903	CFQ_ATTR(back_seek_max),
3904	CFQ_ATTR(back_seek_penalty),
3905	CFQ_ATTR(slice_sync),
3906	CFQ_ATTR(slice_async),
3907	CFQ_ATTR(slice_async_rq),
3908	CFQ_ATTR(slice_idle),
3909	CFQ_ATTR(low_latency),
3910	CFQ_ATTR(group_isolation),
3911	__ATTR_NULL
3912};
3913
3914static struct elevator_type iosched_cfq = {
3915	.ops = {
3916		.elevator_merge_fn = 		cfq_merge,
3917		.elevator_merged_fn =		cfq_merged_request,
3918		.elevator_merge_req_fn =	cfq_merged_requests,
3919		.elevator_allow_merge_fn =	cfq_allow_merge,
3920		.elevator_bio_merged_fn =	cfq_bio_merged,
3921		.elevator_dispatch_fn =		cfq_dispatch_requests,
3922		.elevator_add_req_fn =		cfq_insert_request,
3923		.elevator_activate_req_fn =	cfq_activate_request,
3924		.elevator_deactivate_req_fn =	cfq_deactivate_request,
3925		.elevator_queue_empty_fn =	cfq_queue_empty,
3926		.elevator_completed_req_fn =	cfq_completed_request,
3927		.elevator_former_req_fn =	elv_rb_former_request,
3928		.elevator_latter_req_fn =	elv_rb_latter_request,
3929		.elevator_set_req_fn =		cfq_set_request,
3930		.elevator_put_req_fn =		cfq_put_request,
3931		.elevator_may_queue_fn =	cfq_may_queue,
3932		.elevator_init_fn =		cfq_init_queue,
3933		.elevator_exit_fn =		cfq_exit_queue,
3934		.trim =				cfq_free_io_context,
3935	},
3936	.elevator_attrs =	cfq_attrs,
3937	.elevator_name =	"cfq",
3938	.elevator_owner =	THIS_MODULE,
3939};
3940
3941#ifdef CONFIG_CFQ_GROUP_IOSCHED
3942static struct blkio_policy_type blkio_policy_cfq = {
3943	.ops = {
3944		.blkio_unlink_group_fn =	cfq_unlink_blkio_group,
3945		.blkio_update_group_weight_fn =	cfq_update_blkio_group_weight,
3946	},
3947};
3948#else
3949static struct blkio_policy_type blkio_policy_cfq;
3950#endif
3951
3952static int __init cfq_init(void)
3953{
3954	/*
3955	 * could be 0 on HZ < 1000 setups
3956	 */
3957	if (!cfq_slice_async)
3958		cfq_slice_async = 1;
3959	if (!cfq_slice_idle)
3960		cfq_slice_idle = 1;
3961
3962	if (cfq_slab_setup())
3963		return -ENOMEM;
3964
3965	elv_register(&iosched_cfq);
3966	blkio_policy_register(&blkio_policy_cfq);
3967
3968	return 0;
3969}
3970
3971static void __exit cfq_exit(void)
3972{
3973	DECLARE_COMPLETION_ONSTACK(all_gone);
3974	blkio_policy_unregister(&blkio_policy_cfq);
3975	elv_unregister(&iosched_cfq);
3976	ioc_gone = &all_gone;
3977	/* ioc_gone's update must be visible before reading ioc_count */
3978	smp_wmb();
3979
3980	/*
3981	 * this also protects us from entering cfq_slab_kill() with
3982	 * pending RCU callbacks
3983	 */
3984	if (elv_ioc_count_read(cfq_ioc_count))
3985		wait_for_completion(&all_gone);
3986	cfq_slab_kill();
3987}
3988
3989module_init(cfq_init);
3990module_exit(cfq_exit);
3991
3992MODULE_AUTHOR("Jens Axboe");
3993MODULE_LICENSE("GPL");
3994MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
3995