cfq-iosched.c revision 76280aff1c7e9ae761cac4b48591c43cd7d69159
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/jiffies.h> 13#include <linux/rbtree.h> 14#include <linux/ioprio.h> 15#include <linux/blktrace_api.h> 16 17/* 18 * tunables 19 */ 20/* max queue in one round of service */ 21static const int cfq_quantum = 4; 22static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 23/* maximum backwards seek, in KiB */ 24static const int cfq_back_max = 16 * 1024; 25/* penalty of a backwards seek */ 26static const int cfq_back_penalty = 2; 27static const int cfq_slice_sync = HZ / 10; 28static int cfq_slice_async = HZ / 25; 29static const int cfq_slice_async_rq = 2; 30static int cfq_slice_idle = HZ / 125; 31static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 32static const int cfq_hist_divisor = 4; 33 34/* 35 * offset from end of service tree 36 */ 37#define CFQ_IDLE_DELAY (HZ / 5) 38 39/* 40 * below this threshold, we consider thinktime immediate 41 */ 42#define CFQ_MIN_TT (2) 43 44/* 45 * Allow merged cfqqs to perform this amount of seeky I/O before 46 * deciding to break the queues up again. 47 */ 48#define CFQQ_COOP_TOUT (HZ) 49 50#define CFQ_SLICE_SCALE (5) 51#define CFQ_HW_QUEUE_MIN (5) 52 53#define RQ_CIC(rq) \ 54 ((struct cfq_io_context *) (rq)->elevator_private) 55#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 56 57static struct kmem_cache *cfq_pool; 58static struct kmem_cache *cfq_ioc_pool; 59 60static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); 61static struct completion *ioc_gone; 62static DEFINE_SPINLOCK(ioc_gone_lock); 63 64#define CFQ_PRIO_LISTS IOPRIO_BE_NR 65#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 66#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 67 68#define sample_valid(samples) ((samples) > 80) 69 70/* 71 * Most of our rbtree usage is for sorting with min extraction, so 72 * if we cache the leftmost node we don't have to walk down the tree 73 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 74 * move this into the elevator for the rq sorting as well. 75 */ 76struct cfq_rb_root { 77 struct rb_root rb; 78 struct rb_node *left; 79 unsigned count; 80}; 81#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, } 82 83/* 84 * Per process-grouping structure 85 */ 86struct cfq_queue { 87 /* reference count */ 88 atomic_t ref; 89 /* various state flags, see below */ 90 unsigned int flags; 91 /* parent cfq_data */ 92 struct cfq_data *cfqd; 93 /* service_tree member */ 94 struct rb_node rb_node; 95 /* service_tree key */ 96 unsigned long rb_key; 97 /* prio tree member */ 98 struct rb_node p_node; 99 /* prio tree root we belong to, if any */ 100 struct rb_root *p_root; 101 /* sorted list of pending requests */ 102 struct rb_root sort_list; 103 /* if fifo isn't expired, next request to serve */ 104 struct request *next_rq; 105 /* requests queued in sort_list */ 106 int queued[2]; 107 /* currently allocated requests */ 108 int allocated[2]; 109 /* fifo list of requests in sort_list */ 110 struct list_head fifo; 111 112 unsigned long slice_end; 113 long slice_resid; 114 unsigned int slice_dispatch; 115 116 /* pending metadata requests */ 117 int meta_pending; 118 /* number of requests that are on the dispatch list or inside driver */ 119 int dispatched; 120 121 /* io prio of this group */ 122 unsigned short ioprio, org_ioprio; 123 unsigned short ioprio_class, org_ioprio_class; 124 125 unsigned int seek_samples; 126 u64 seek_total; 127 sector_t seek_mean; 128 sector_t last_request_pos; 129 unsigned long seeky_start; 130 131 pid_t pid; 132 133 struct cfq_rb_root *service_tree; 134 struct cfq_queue *new_cfqq; 135}; 136 137/* 138 * First index in the service_trees. 139 * IDLE is handled separately, so it has negative index 140 */ 141enum wl_prio_t { 142 IDLE_WORKLOAD = -1, 143 BE_WORKLOAD = 0, 144 RT_WORKLOAD = 1 145}; 146 147/* 148 * Second index in the service_trees. 149 */ 150enum wl_type_t { 151 ASYNC_WORKLOAD = 0, 152 SYNC_NOIDLE_WORKLOAD = 1, 153 SYNC_WORKLOAD = 2 154}; 155 156 157/* 158 * Per block device queue structure 159 */ 160struct cfq_data { 161 struct request_queue *queue; 162 163 /* 164 * rr lists of queues with requests, onle rr for each priority class. 165 * Counts are embedded in the cfq_rb_root 166 */ 167 struct cfq_rb_root service_trees[2][3]; 168 struct cfq_rb_root service_tree_idle; 169 /* 170 * The priority currently being served 171 */ 172 enum wl_prio_t serving_prio; 173 enum wl_type_t serving_type; 174 unsigned long workload_expires; 175 176 /* 177 * Each priority tree is sorted by next_request position. These 178 * trees are used when determining if two or more queues are 179 * interleaving requests (see cfq_close_cooperator). 180 */ 181 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 182 183 unsigned int busy_queues; 184 unsigned int busy_queues_avg[2]; 185 186 int rq_in_driver[2]; 187 int sync_flight; 188 189 /* 190 * queue-depth detection 191 */ 192 int rq_queued; 193 int hw_tag; 194 /* 195 * hw_tag can be 196 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) 197 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) 198 * 0 => no NCQ 199 */ 200 int hw_tag_est_depth; 201 unsigned int hw_tag_samples; 202 203 /* 204 * idle window management 205 */ 206 struct timer_list idle_slice_timer; 207 struct work_struct unplug_work; 208 209 struct cfq_queue *active_queue; 210 struct cfq_io_context *active_cic; 211 212 /* 213 * async queue for each priority case 214 */ 215 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 216 struct cfq_queue *async_idle_cfqq; 217 218 sector_t last_position; 219 220 /* 221 * tunables, see top of file 222 */ 223 unsigned int cfq_quantum; 224 unsigned int cfq_fifo_expire[2]; 225 unsigned int cfq_back_penalty; 226 unsigned int cfq_back_max; 227 unsigned int cfq_slice[2]; 228 unsigned int cfq_slice_async_rq; 229 unsigned int cfq_slice_idle; 230 unsigned int cfq_latency; 231 232 struct list_head cic_list; 233 234 /* 235 * Fallback dummy cfqq for extreme OOM conditions 236 */ 237 struct cfq_queue oom_cfqq; 238 239 unsigned long last_end_sync_rq; 240}; 241 242static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio, 243 enum wl_type_t type, 244 struct cfq_data *cfqd) 245{ 246 if (prio == IDLE_WORKLOAD) 247 return &cfqd->service_tree_idle; 248 249 return &cfqd->service_trees[prio][type]; 250} 251 252enum cfqq_state_flags { 253 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 254 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 255 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ 256 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 257 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 258 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 259 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 260 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 261 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 262 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 263 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 264}; 265 266#define CFQ_CFQQ_FNS(name) \ 267static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 268{ \ 269 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 270} \ 271static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 272{ \ 273 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 274} \ 275static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 276{ \ 277 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 278} 279 280CFQ_CFQQ_FNS(on_rr); 281CFQ_CFQQ_FNS(wait_request); 282CFQ_CFQQ_FNS(must_dispatch); 283CFQ_CFQQ_FNS(must_alloc_slice); 284CFQ_CFQQ_FNS(fifo_expire); 285CFQ_CFQQ_FNS(idle_window); 286CFQ_CFQQ_FNS(prio_changed); 287CFQ_CFQQ_FNS(slice_new); 288CFQ_CFQQ_FNS(sync); 289CFQ_CFQQ_FNS(coop); 290CFQ_CFQQ_FNS(deep); 291#undef CFQ_CFQQ_FNS 292 293#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 294 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 295#define cfq_log(cfqd, fmt, args...) \ 296 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 297 298static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) 299{ 300 if (cfq_class_idle(cfqq)) 301 return IDLE_WORKLOAD; 302 if (cfq_class_rt(cfqq)) 303 return RT_WORKLOAD; 304 return BE_WORKLOAD; 305} 306 307 308static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) 309{ 310 if (!cfq_cfqq_sync(cfqq)) 311 return ASYNC_WORKLOAD; 312 if (!cfq_cfqq_idle_window(cfqq)) 313 return SYNC_NOIDLE_WORKLOAD; 314 return SYNC_WORKLOAD; 315} 316 317static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd) 318{ 319 if (wl == IDLE_WORKLOAD) 320 return cfqd->service_tree_idle.count; 321 322 return cfqd->service_trees[wl][ASYNC_WORKLOAD].count 323 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count 324 + cfqd->service_trees[wl][SYNC_WORKLOAD].count; 325} 326 327static void cfq_dispatch_insert(struct request_queue *, struct request *); 328static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 329 struct io_context *, gfp_t); 330static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 331 struct io_context *); 332 333static inline int rq_in_driver(struct cfq_data *cfqd) 334{ 335 return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1]; 336} 337 338static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 339 bool is_sync) 340{ 341 return cic->cfqq[is_sync]; 342} 343 344static inline void cic_set_cfqq(struct cfq_io_context *cic, 345 struct cfq_queue *cfqq, bool is_sync) 346{ 347 cic->cfqq[is_sync] = cfqq; 348} 349 350/* 351 * We regard a request as SYNC, if it's either a read or has the SYNC bit 352 * set (in which case it could also be direct WRITE). 353 */ 354static inline bool cfq_bio_sync(struct bio *bio) 355{ 356 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 357} 358 359/* 360 * scheduler run of queue, if there are requests pending and no one in the 361 * driver that will restart queueing 362 */ 363static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 364{ 365 if (cfqd->busy_queues) { 366 cfq_log(cfqd, "schedule dispatch"); 367 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 368 } 369} 370 371static int cfq_queue_empty(struct request_queue *q) 372{ 373 struct cfq_data *cfqd = q->elevator->elevator_data; 374 375 return !cfqd->busy_queues; 376} 377 378/* 379 * Scale schedule slice based on io priority. Use the sync time slice only 380 * if a queue is marked sync and has sync io queued. A sync queue with async 381 * io only, should not get full sync slice length. 382 */ 383static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, 384 unsigned short prio) 385{ 386 const int base_slice = cfqd->cfq_slice[sync]; 387 388 WARN_ON(prio >= IOPRIO_BE_NR); 389 390 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 391} 392 393static inline int 394cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 395{ 396 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 397} 398 399/* 400 * get averaged number of queues of RT/BE priority. 401 * average is updated, with a formula that gives more weight to higher numbers, 402 * to quickly follows sudden increases and decrease slowly 403 */ 404 405static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) 406{ 407 unsigned min_q, max_q; 408 unsigned mult = cfq_hist_divisor - 1; 409 unsigned round = cfq_hist_divisor / 2; 410 unsigned busy = cfq_busy_queues_wl(rt, cfqd); 411 412 min_q = min(cfqd->busy_queues_avg[rt], busy); 413 max_q = max(cfqd->busy_queues_avg[rt], busy); 414 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) / 415 cfq_hist_divisor; 416 return cfqd->busy_queues_avg[rt]; 417} 418 419static inline void 420cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 421{ 422 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); 423 if (cfqd->cfq_latency) { 424 /* interested queues (we consider only the ones with the same 425 * priority class) */ 426 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq)); 427 unsigned sync_slice = cfqd->cfq_slice[1]; 428 unsigned expect_latency = sync_slice * iq; 429 if (expect_latency > cfq_target_latency) { 430 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; 431 /* scale low_slice according to IO priority 432 * and sync vs async */ 433 unsigned low_slice = 434 min(slice, base_low_slice * slice / sync_slice); 435 /* the adapted slice value is scaled to fit all iqs 436 * into the target latency */ 437 slice = max(slice * cfq_target_latency / expect_latency, 438 low_slice); 439 } 440 } 441 cfqq->slice_end = jiffies + slice; 442 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 443} 444 445/* 446 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 447 * isn't valid until the first request from the dispatch is activated 448 * and the slice time set. 449 */ 450static inline bool cfq_slice_used(struct cfq_queue *cfqq) 451{ 452 if (cfq_cfqq_slice_new(cfqq)) 453 return 0; 454 if (time_before(jiffies, cfqq->slice_end)) 455 return 0; 456 457 return 1; 458} 459 460/* 461 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 462 * We choose the request that is closest to the head right now. Distance 463 * behind the head is penalized and only allowed to a certain extent. 464 */ 465static struct request * 466cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) 467{ 468 sector_t s1, s2, d1 = 0, d2 = 0; 469 unsigned long back_max; 470#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 471#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 472 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 473 474 if (rq1 == NULL || rq1 == rq2) 475 return rq2; 476 if (rq2 == NULL) 477 return rq1; 478 479 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 480 return rq1; 481 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 482 return rq2; 483 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 484 return rq1; 485 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 486 return rq2; 487 488 s1 = blk_rq_pos(rq1); 489 s2 = blk_rq_pos(rq2); 490 491 /* 492 * by definition, 1KiB is 2 sectors 493 */ 494 back_max = cfqd->cfq_back_max * 2; 495 496 /* 497 * Strict one way elevator _except_ in the case where we allow 498 * short backward seeks which are biased as twice the cost of a 499 * similar forward seek. 500 */ 501 if (s1 >= last) 502 d1 = s1 - last; 503 else if (s1 + back_max >= last) 504 d1 = (last - s1) * cfqd->cfq_back_penalty; 505 else 506 wrap |= CFQ_RQ1_WRAP; 507 508 if (s2 >= last) 509 d2 = s2 - last; 510 else if (s2 + back_max >= last) 511 d2 = (last - s2) * cfqd->cfq_back_penalty; 512 else 513 wrap |= CFQ_RQ2_WRAP; 514 515 /* Found required data */ 516 517 /* 518 * By doing switch() on the bit mask "wrap" we avoid having to 519 * check two variables for all permutations: --> faster! 520 */ 521 switch (wrap) { 522 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 523 if (d1 < d2) 524 return rq1; 525 else if (d2 < d1) 526 return rq2; 527 else { 528 if (s1 >= s2) 529 return rq1; 530 else 531 return rq2; 532 } 533 534 case CFQ_RQ2_WRAP: 535 return rq1; 536 case CFQ_RQ1_WRAP: 537 return rq2; 538 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 539 default: 540 /* 541 * Since both rqs are wrapped, 542 * start with the one that's further behind head 543 * (--> only *one* back seek required), 544 * since back seek takes more time than forward. 545 */ 546 if (s1 <= s2) 547 return rq1; 548 else 549 return rq2; 550 } 551} 552 553/* 554 * The below is leftmost cache rbtree addon 555 */ 556static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 557{ 558 if (!root->left) 559 root->left = rb_first(&root->rb); 560 561 if (root->left) 562 return rb_entry(root->left, struct cfq_queue, rb_node); 563 564 return NULL; 565} 566 567static void rb_erase_init(struct rb_node *n, struct rb_root *root) 568{ 569 rb_erase(n, root); 570 RB_CLEAR_NODE(n); 571} 572 573static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 574{ 575 if (root->left == n) 576 root->left = NULL; 577 rb_erase_init(n, &root->rb); 578 --root->count; 579} 580 581/* 582 * would be nice to take fifo expire time into account as well 583 */ 584static struct request * 585cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 586 struct request *last) 587{ 588 struct rb_node *rbnext = rb_next(&last->rb_node); 589 struct rb_node *rbprev = rb_prev(&last->rb_node); 590 struct request *next = NULL, *prev = NULL; 591 592 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 593 594 if (rbprev) 595 prev = rb_entry_rq(rbprev); 596 597 if (rbnext) 598 next = rb_entry_rq(rbnext); 599 else { 600 rbnext = rb_first(&cfqq->sort_list); 601 if (rbnext && rbnext != &last->rb_node) 602 next = rb_entry_rq(rbnext); 603 } 604 605 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); 606} 607 608static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 609 struct cfq_queue *cfqq) 610{ 611 struct cfq_rb_root *service_tree; 612 613 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd); 614 615 /* 616 * just an approximation, should be ok. 617 */ 618 return service_tree->count * (cfq_prio_slice(cfqd, 1, 0) - 619 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 620} 621 622/* 623 * The cfqd->service_trees holds all pending cfq_queue's that have 624 * requests waiting to be processed. It is sorted in the order that 625 * we will service the queues. 626 */ 627static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 628 bool add_front) 629{ 630 struct rb_node **p, *parent; 631 struct cfq_queue *__cfqq; 632 unsigned long rb_key; 633 struct cfq_rb_root *service_tree; 634 int left; 635 636 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd); 637 if (cfq_class_idle(cfqq)) { 638 rb_key = CFQ_IDLE_DELAY; 639 parent = rb_last(&service_tree->rb); 640 if (parent && parent != &cfqq->rb_node) { 641 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 642 rb_key += __cfqq->rb_key; 643 } else 644 rb_key += jiffies; 645 } else if (!add_front) { 646 /* 647 * Get our rb key offset. Subtract any residual slice 648 * value carried from last service. A negative resid 649 * count indicates slice overrun, and this should position 650 * the next service time further away in the tree. 651 */ 652 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 653 rb_key -= cfqq->slice_resid; 654 cfqq->slice_resid = 0; 655 } else { 656 rb_key = -HZ; 657 __cfqq = cfq_rb_first(service_tree); 658 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 659 } 660 661 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 662 /* 663 * same position, nothing more to do 664 */ 665 if (rb_key == cfqq->rb_key && 666 cfqq->service_tree == service_tree) 667 return; 668 669 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 670 cfqq->service_tree = NULL; 671 } 672 673 left = 1; 674 parent = NULL; 675 cfqq->service_tree = service_tree; 676 p = &service_tree->rb.rb_node; 677 while (*p) { 678 struct rb_node **n; 679 680 parent = *p; 681 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 682 683 /* 684 * sort by key, that represents service time. 685 */ 686 if (time_before(rb_key, __cfqq->rb_key)) 687 n = &(*p)->rb_left; 688 else { 689 n = &(*p)->rb_right; 690 left = 0; 691 } 692 693 p = n; 694 } 695 696 if (left) 697 service_tree->left = &cfqq->rb_node; 698 699 cfqq->rb_key = rb_key; 700 rb_link_node(&cfqq->rb_node, parent, p); 701 rb_insert_color(&cfqq->rb_node, &service_tree->rb); 702 service_tree->count++; 703} 704 705static struct cfq_queue * 706cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, 707 sector_t sector, struct rb_node **ret_parent, 708 struct rb_node ***rb_link) 709{ 710 struct rb_node **p, *parent; 711 struct cfq_queue *cfqq = NULL; 712 713 parent = NULL; 714 p = &root->rb_node; 715 while (*p) { 716 struct rb_node **n; 717 718 parent = *p; 719 cfqq = rb_entry(parent, struct cfq_queue, p_node); 720 721 /* 722 * Sort strictly based on sector. Smallest to the left, 723 * largest to the right. 724 */ 725 if (sector > blk_rq_pos(cfqq->next_rq)) 726 n = &(*p)->rb_right; 727 else if (sector < blk_rq_pos(cfqq->next_rq)) 728 n = &(*p)->rb_left; 729 else 730 break; 731 p = n; 732 cfqq = NULL; 733 } 734 735 *ret_parent = parent; 736 if (rb_link) 737 *rb_link = p; 738 return cfqq; 739} 740 741static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) 742{ 743 struct rb_node **p, *parent; 744 struct cfq_queue *__cfqq; 745 746 if (cfqq->p_root) { 747 rb_erase(&cfqq->p_node, cfqq->p_root); 748 cfqq->p_root = NULL; 749 } 750 751 if (cfq_class_idle(cfqq)) 752 return; 753 if (!cfqq->next_rq) 754 return; 755 756 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 757 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, 758 blk_rq_pos(cfqq->next_rq), &parent, &p); 759 if (!__cfqq) { 760 rb_link_node(&cfqq->p_node, parent, p); 761 rb_insert_color(&cfqq->p_node, cfqq->p_root); 762 } else 763 cfqq->p_root = NULL; 764} 765 766/* 767 * Update cfqq's position in the service tree. 768 */ 769static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 770{ 771 /* 772 * Resorting requires the cfqq to be on the RR list already. 773 */ 774 if (cfq_cfqq_on_rr(cfqq)) { 775 cfq_service_tree_add(cfqd, cfqq, 0); 776 cfq_prio_tree_add(cfqd, cfqq); 777 } 778} 779 780/* 781 * add to busy list of queues for service, trying to be fair in ordering 782 * the pending list according to last request service 783 */ 784static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 785{ 786 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); 787 BUG_ON(cfq_cfqq_on_rr(cfqq)); 788 cfq_mark_cfqq_on_rr(cfqq); 789 cfqd->busy_queues++; 790 791 cfq_resort_rr_list(cfqd, cfqq); 792} 793 794/* 795 * Called when the cfqq no longer has requests pending, remove it from 796 * the service tree. 797 */ 798static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 799{ 800 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); 801 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 802 cfq_clear_cfqq_on_rr(cfqq); 803 804 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 805 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 806 cfqq->service_tree = NULL; 807 } 808 if (cfqq->p_root) { 809 rb_erase(&cfqq->p_node, cfqq->p_root); 810 cfqq->p_root = NULL; 811 } 812 813 BUG_ON(!cfqd->busy_queues); 814 cfqd->busy_queues--; 815} 816 817/* 818 * rb tree support functions 819 */ 820static void cfq_del_rq_rb(struct request *rq) 821{ 822 struct cfq_queue *cfqq = RQ_CFQQ(rq); 823 struct cfq_data *cfqd = cfqq->cfqd; 824 const int sync = rq_is_sync(rq); 825 826 BUG_ON(!cfqq->queued[sync]); 827 cfqq->queued[sync]--; 828 829 elv_rb_del(&cfqq->sort_list, rq); 830 831 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 832 cfq_del_cfqq_rr(cfqd, cfqq); 833} 834 835static void cfq_add_rq_rb(struct request *rq) 836{ 837 struct cfq_queue *cfqq = RQ_CFQQ(rq); 838 struct cfq_data *cfqd = cfqq->cfqd; 839 struct request *__alias, *prev; 840 841 cfqq->queued[rq_is_sync(rq)]++; 842 843 /* 844 * looks a little odd, but the first insert might return an alias. 845 * if that happens, put the alias on the dispatch list 846 */ 847 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 848 cfq_dispatch_insert(cfqd->queue, __alias); 849 850 if (!cfq_cfqq_on_rr(cfqq)) 851 cfq_add_cfqq_rr(cfqd, cfqq); 852 853 /* 854 * check if this request is a better next-serve candidate 855 */ 856 prev = cfqq->next_rq; 857 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); 858 859 /* 860 * adjust priority tree position, if ->next_rq changes 861 */ 862 if (prev != cfqq->next_rq) 863 cfq_prio_tree_add(cfqd, cfqq); 864 865 BUG_ON(!cfqq->next_rq); 866} 867 868static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 869{ 870 elv_rb_del(&cfqq->sort_list, rq); 871 cfqq->queued[rq_is_sync(rq)]--; 872 cfq_add_rq_rb(rq); 873} 874 875static struct request * 876cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 877{ 878 struct task_struct *tsk = current; 879 struct cfq_io_context *cic; 880 struct cfq_queue *cfqq; 881 882 cic = cfq_cic_lookup(cfqd, tsk->io_context); 883 if (!cic) 884 return NULL; 885 886 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 887 if (cfqq) { 888 sector_t sector = bio->bi_sector + bio_sectors(bio); 889 890 return elv_rb_find(&cfqq->sort_list, sector); 891 } 892 893 return NULL; 894} 895 896static void cfq_activate_request(struct request_queue *q, struct request *rq) 897{ 898 struct cfq_data *cfqd = q->elevator->elevator_data; 899 900 cfqd->rq_in_driver[rq_is_sync(rq)]++; 901 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 902 rq_in_driver(cfqd)); 903 904 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 905} 906 907static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 908{ 909 struct cfq_data *cfqd = q->elevator->elevator_data; 910 const int sync = rq_is_sync(rq); 911 912 WARN_ON(!cfqd->rq_in_driver[sync]); 913 cfqd->rq_in_driver[sync]--; 914 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 915 rq_in_driver(cfqd)); 916} 917 918static void cfq_remove_request(struct request *rq) 919{ 920 struct cfq_queue *cfqq = RQ_CFQQ(rq); 921 922 if (cfqq->next_rq == rq) 923 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 924 925 list_del_init(&rq->queuelist); 926 cfq_del_rq_rb(rq); 927 928 cfqq->cfqd->rq_queued--; 929 if (rq_is_meta(rq)) { 930 WARN_ON(!cfqq->meta_pending); 931 cfqq->meta_pending--; 932 } 933} 934 935static int cfq_merge(struct request_queue *q, struct request **req, 936 struct bio *bio) 937{ 938 struct cfq_data *cfqd = q->elevator->elevator_data; 939 struct request *__rq; 940 941 __rq = cfq_find_rq_fmerge(cfqd, bio); 942 if (__rq && elv_rq_merge_ok(__rq, bio)) { 943 *req = __rq; 944 return ELEVATOR_FRONT_MERGE; 945 } 946 947 return ELEVATOR_NO_MERGE; 948} 949 950static void cfq_merged_request(struct request_queue *q, struct request *req, 951 int type) 952{ 953 if (type == ELEVATOR_FRONT_MERGE) { 954 struct cfq_queue *cfqq = RQ_CFQQ(req); 955 956 cfq_reposition_rq_rb(cfqq, req); 957 } 958} 959 960static void 961cfq_merged_requests(struct request_queue *q, struct request *rq, 962 struct request *next) 963{ 964 struct cfq_queue *cfqq = RQ_CFQQ(rq); 965 /* 966 * reposition in fifo if next is older than rq 967 */ 968 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 969 time_before(rq_fifo_time(next), rq_fifo_time(rq))) { 970 list_move(&rq->queuelist, &next->queuelist); 971 rq_set_fifo_time(rq, rq_fifo_time(next)); 972 } 973 974 if (cfqq->next_rq == next) 975 cfqq->next_rq = rq; 976 cfq_remove_request(next); 977} 978 979static int cfq_allow_merge(struct request_queue *q, struct request *rq, 980 struct bio *bio) 981{ 982 struct cfq_data *cfqd = q->elevator->elevator_data; 983 struct cfq_io_context *cic; 984 struct cfq_queue *cfqq; 985 986 /* 987 * Disallow merge of a sync bio into an async request. 988 */ 989 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 990 return false; 991 992 /* 993 * Lookup the cfqq that this bio will be queued with. Allow 994 * merge only if rq is queued there. 995 */ 996 cic = cfq_cic_lookup(cfqd, current->io_context); 997 if (!cic) 998 return false; 999 1000 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 1001 return cfqq == RQ_CFQQ(rq); 1002} 1003 1004static void __cfq_set_active_queue(struct cfq_data *cfqd, 1005 struct cfq_queue *cfqq) 1006{ 1007 if (cfqq) { 1008 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1009 cfqq->slice_end = 0; 1010 cfqq->slice_dispatch = 0; 1011 1012 cfq_clear_cfqq_wait_request(cfqq); 1013 cfq_clear_cfqq_must_dispatch(cfqq); 1014 cfq_clear_cfqq_must_alloc_slice(cfqq); 1015 cfq_clear_cfqq_fifo_expire(cfqq); 1016 cfq_mark_cfqq_slice_new(cfqq); 1017 1018 del_timer(&cfqd->idle_slice_timer); 1019 } 1020 1021 cfqd->active_queue = cfqq; 1022} 1023 1024/* 1025 * current cfqq expired its slice (or was too idle), select new one 1026 */ 1027static void 1028__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1029 bool timed_out) 1030{ 1031 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 1032 1033 if (cfq_cfqq_wait_request(cfqq)) 1034 del_timer(&cfqd->idle_slice_timer); 1035 1036 cfq_clear_cfqq_wait_request(cfqq); 1037 1038 /* 1039 * store what was left of this slice, if the queue idled/timed out 1040 */ 1041 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 1042 cfqq->slice_resid = cfqq->slice_end - jiffies; 1043 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 1044 } 1045 1046 cfq_resort_rr_list(cfqd, cfqq); 1047 1048 if (cfqq == cfqd->active_queue) 1049 cfqd->active_queue = NULL; 1050 1051 if (cfqd->active_cic) { 1052 put_io_context(cfqd->active_cic->ioc); 1053 cfqd->active_cic = NULL; 1054 } 1055} 1056 1057static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) 1058{ 1059 struct cfq_queue *cfqq = cfqd->active_queue; 1060 1061 if (cfqq) 1062 __cfq_slice_expired(cfqd, cfqq, timed_out); 1063} 1064 1065/* 1066 * Get next queue for service. Unless we have a queue preemption, 1067 * we'll simply select the first cfqq in the service tree. 1068 */ 1069static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 1070{ 1071 struct cfq_rb_root *service_tree = 1072 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd); 1073 1074 if (RB_EMPTY_ROOT(&service_tree->rb)) 1075 return NULL; 1076 return cfq_rb_first(service_tree); 1077} 1078 1079/* 1080 * Get and set a new active queue for service. 1081 */ 1082static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, 1083 struct cfq_queue *cfqq) 1084{ 1085 if (!cfqq) 1086 cfqq = cfq_get_next_queue(cfqd); 1087 1088 __cfq_set_active_queue(cfqd, cfqq); 1089 return cfqq; 1090} 1091 1092static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 1093 struct request *rq) 1094{ 1095 if (blk_rq_pos(rq) >= cfqd->last_position) 1096 return blk_rq_pos(rq) - cfqd->last_position; 1097 else 1098 return cfqd->last_position - blk_rq_pos(rq); 1099} 1100 1101#define CFQQ_SEEK_THR 8 * 1024 1102#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) 1103 1104static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1105 struct request *rq) 1106{ 1107 sector_t sdist = cfqq->seek_mean; 1108 1109 if (!sample_valid(cfqq->seek_samples)) 1110 sdist = CFQQ_SEEK_THR; 1111 1112 return cfq_dist_from_last(cfqd, rq) <= sdist; 1113} 1114 1115static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 1116 struct cfq_queue *cur_cfqq) 1117{ 1118 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; 1119 struct rb_node *parent, *node; 1120 struct cfq_queue *__cfqq; 1121 sector_t sector = cfqd->last_position; 1122 1123 if (RB_EMPTY_ROOT(root)) 1124 return NULL; 1125 1126 /* 1127 * First, if we find a request starting at the end of the last 1128 * request, choose it. 1129 */ 1130 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); 1131 if (__cfqq) 1132 return __cfqq; 1133 1134 /* 1135 * If the exact sector wasn't found, the parent of the NULL leaf 1136 * will contain the closest sector. 1137 */ 1138 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1139 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1140 return __cfqq; 1141 1142 if (blk_rq_pos(__cfqq->next_rq) < sector) 1143 node = rb_next(&__cfqq->p_node); 1144 else 1145 node = rb_prev(&__cfqq->p_node); 1146 if (!node) 1147 return NULL; 1148 1149 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1150 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 1151 return __cfqq; 1152 1153 return NULL; 1154} 1155 1156/* 1157 * cfqd - obvious 1158 * cur_cfqq - passed in so that we don't decide that the current queue is 1159 * closely cooperating with itself. 1160 * 1161 * So, basically we're assuming that that cur_cfqq has dispatched at least 1162 * one request, and that cfqd->last_position reflects a position on the disk 1163 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid 1164 * assumption. 1165 */ 1166static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1167 struct cfq_queue *cur_cfqq) 1168{ 1169 struct cfq_queue *cfqq; 1170 1171 if (!cfq_cfqq_sync(cur_cfqq)) 1172 return NULL; 1173 if (CFQQ_SEEKY(cur_cfqq)) 1174 return NULL; 1175 1176 /* 1177 * We should notice if some of the queues are cooperating, eg 1178 * working closely on the same area of the disk. In that case, 1179 * we can group them together and don't waste time idling. 1180 */ 1181 cfqq = cfqq_close(cfqd, cur_cfqq); 1182 if (!cfqq) 1183 return NULL; 1184 1185 /* 1186 * It only makes sense to merge sync queues. 1187 */ 1188 if (!cfq_cfqq_sync(cfqq)) 1189 return NULL; 1190 if (CFQQ_SEEKY(cfqq)) 1191 return NULL; 1192 1193 /* 1194 * Do not merge queues of different priority classes 1195 */ 1196 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) 1197 return NULL; 1198 1199 return cfqq; 1200} 1201 1202/* 1203 * Determine whether we should enforce idle window for this queue. 1204 */ 1205 1206static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1207{ 1208 enum wl_prio_t prio = cfqq_prio(cfqq); 1209 struct cfq_rb_root *service_tree = cfqq->service_tree; 1210 1211 /* We never do for idle class queues. */ 1212 if (prio == IDLE_WORKLOAD) 1213 return false; 1214 1215 /* We do for queues that were marked with idle window flag. */ 1216 if (cfq_cfqq_idle_window(cfqq)) 1217 return true; 1218 1219 /* 1220 * Otherwise, we do only if they are the last ones 1221 * in their service tree. 1222 */ 1223 if (!service_tree) 1224 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd); 1225 1226 if (service_tree->count == 0) 1227 return true; 1228 1229 return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq); 1230} 1231 1232static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1233{ 1234 struct cfq_queue *cfqq = cfqd->active_queue; 1235 struct cfq_io_context *cic; 1236 unsigned long sl; 1237 1238 /* 1239 * SSD device without seek penalty, disable idling. But only do so 1240 * for devices that support queuing, otherwise we still have a problem 1241 * with sync vs async workloads. 1242 */ 1243 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 1244 return; 1245 1246 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 1247 WARN_ON(cfq_cfqq_slice_new(cfqq)); 1248 1249 /* 1250 * idle is disabled, either manually or by past process history 1251 */ 1252 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq)) 1253 return; 1254 1255 /* 1256 * still requests with the driver, don't idle 1257 */ 1258 if (rq_in_driver(cfqd)) 1259 return; 1260 1261 /* 1262 * task has exited, don't wait 1263 */ 1264 cic = cfqd->active_cic; 1265 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1266 return; 1267 1268 /* 1269 * If our average think time is larger than the remaining time 1270 * slice, then don't idle. This avoids overrunning the allotted 1271 * time slice. 1272 */ 1273 if (sample_valid(cic->ttime_samples) && 1274 (cfqq->slice_end - jiffies < cic->ttime_mean)) 1275 return; 1276 1277 cfq_mark_cfqq_wait_request(cfqq); 1278 1279 sl = cfqd->cfq_slice_idle; 1280 1281 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 1282 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl); 1283} 1284 1285/* 1286 * Move request from internal lists to the request queue dispatch list. 1287 */ 1288static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 1289{ 1290 struct cfq_data *cfqd = q->elevator->elevator_data; 1291 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1292 1293 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); 1294 1295 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 1296 cfq_remove_request(rq); 1297 cfqq->dispatched++; 1298 elv_dispatch_sort(q, rq); 1299 1300 if (cfq_cfqq_sync(cfqq)) 1301 cfqd->sync_flight++; 1302} 1303 1304/* 1305 * return expired entry, or NULL to just start from scratch in rbtree 1306 */ 1307static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 1308{ 1309 struct request *rq = NULL; 1310 1311 if (cfq_cfqq_fifo_expire(cfqq)) 1312 return NULL; 1313 1314 cfq_mark_cfqq_fifo_expire(cfqq); 1315 1316 if (list_empty(&cfqq->fifo)) 1317 return NULL; 1318 1319 rq = rq_entry_fifo(cfqq->fifo.next); 1320 if (time_before(jiffies, rq_fifo_time(rq))) 1321 rq = NULL; 1322 1323 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 1324 return rq; 1325} 1326 1327static inline int 1328cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1329{ 1330 const int base_rq = cfqd->cfq_slice_async_rq; 1331 1332 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 1333 1334 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 1335} 1336 1337/* 1338 * Must be called with the queue_lock held. 1339 */ 1340static int cfqq_process_refs(struct cfq_queue *cfqq) 1341{ 1342 int process_refs, io_refs; 1343 1344 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; 1345 process_refs = atomic_read(&cfqq->ref) - io_refs; 1346 BUG_ON(process_refs < 0); 1347 return process_refs; 1348} 1349 1350static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) 1351{ 1352 int process_refs, new_process_refs; 1353 struct cfq_queue *__cfqq; 1354 1355 /* Avoid a circular list and skip interim queue merges */ 1356 while ((__cfqq = new_cfqq->new_cfqq)) { 1357 if (__cfqq == cfqq) 1358 return; 1359 new_cfqq = __cfqq; 1360 } 1361 1362 process_refs = cfqq_process_refs(cfqq); 1363 /* 1364 * If the process for the cfqq has gone away, there is no 1365 * sense in merging the queues. 1366 */ 1367 if (process_refs == 0) 1368 return; 1369 1370 /* 1371 * Merge in the direction of the lesser amount of work. 1372 */ 1373 new_process_refs = cfqq_process_refs(new_cfqq); 1374 if (new_process_refs >= process_refs) { 1375 cfqq->new_cfqq = new_cfqq; 1376 atomic_add(process_refs, &new_cfqq->ref); 1377 } else { 1378 new_cfqq->new_cfqq = cfqq; 1379 atomic_add(new_process_refs, &cfqq->ref); 1380 } 1381} 1382 1383static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio, 1384 bool prio_changed) 1385{ 1386 struct cfq_queue *queue; 1387 int i; 1388 bool key_valid = false; 1389 unsigned long lowest_key = 0; 1390 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 1391 1392 if (prio_changed) { 1393 /* 1394 * When priorities switched, we prefer starting 1395 * from SYNC_NOIDLE (first choice), or just SYNC 1396 * over ASYNC 1397 */ 1398 if (service_tree_for(prio, cur_best, cfqd)->count) 1399 return cur_best; 1400 cur_best = SYNC_WORKLOAD; 1401 if (service_tree_for(prio, cur_best, cfqd)->count) 1402 return cur_best; 1403 1404 return ASYNC_WORKLOAD; 1405 } 1406 1407 for (i = 0; i < 3; ++i) { 1408 /* otherwise, select the one with lowest rb_key */ 1409 queue = cfq_rb_first(service_tree_for(prio, i, cfqd)); 1410 if (queue && 1411 (!key_valid || time_before(queue->rb_key, lowest_key))) { 1412 lowest_key = queue->rb_key; 1413 cur_best = i; 1414 key_valid = true; 1415 } 1416 } 1417 1418 return cur_best; 1419} 1420 1421static void choose_service_tree(struct cfq_data *cfqd) 1422{ 1423 enum wl_prio_t previous_prio = cfqd->serving_prio; 1424 bool prio_changed; 1425 unsigned slice; 1426 unsigned count; 1427 1428 /* Choose next priority. RT > BE > IDLE */ 1429 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd)) 1430 cfqd->serving_prio = RT_WORKLOAD; 1431 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd)) 1432 cfqd->serving_prio = BE_WORKLOAD; 1433 else { 1434 cfqd->serving_prio = IDLE_WORKLOAD; 1435 cfqd->workload_expires = jiffies + 1; 1436 return; 1437 } 1438 1439 /* 1440 * For RT and BE, we have to choose also the type 1441 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 1442 * expiration time 1443 */ 1444 prio_changed = (cfqd->serving_prio != previous_prio); 1445 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd) 1446 ->count; 1447 1448 /* 1449 * If priority didn't change, check workload expiration, 1450 * and that we still have other queues ready 1451 */ 1452 if (!prio_changed && count && 1453 !time_after(jiffies, cfqd->workload_expires)) 1454 return; 1455 1456 /* otherwise select new workload type */ 1457 cfqd->serving_type = 1458 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed); 1459 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd) 1460 ->count; 1461 1462 /* 1463 * the workload slice is computed as a fraction of target latency 1464 * proportional to the number of queues in that workload, over 1465 * all the queues in the same priority class 1466 */ 1467 slice = cfq_target_latency * count / 1468 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio], 1469 cfq_busy_queues_wl(cfqd->serving_prio, cfqd)); 1470 1471 if (cfqd->serving_type == ASYNC_WORKLOAD) 1472 /* async workload slice is scaled down according to 1473 * the sync/async slice ratio. */ 1474 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; 1475 else 1476 /* sync workload slice is at least 2 * cfq_slice_idle */ 1477 slice = max(slice, 2 * cfqd->cfq_slice_idle); 1478 1479 slice = max_t(unsigned, slice, CFQ_MIN_TT); 1480 cfqd->workload_expires = jiffies + slice; 1481} 1482 1483/* 1484 * Select a queue for service. If we have a current active queue, 1485 * check whether to continue servicing it, or retrieve and set a new one. 1486 */ 1487static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 1488{ 1489 struct cfq_queue *cfqq, *new_cfqq = NULL; 1490 1491 cfqq = cfqd->active_queue; 1492 if (!cfqq) 1493 goto new_queue; 1494 1495 /* 1496 * The active queue has run out of time, expire it and select new. 1497 */ 1498 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) 1499 goto expire; 1500 1501 /* 1502 * The active queue has requests and isn't expired, allow it to 1503 * dispatch. 1504 */ 1505 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 1506 goto keep_queue; 1507 1508 /* 1509 * If another queue has a request waiting within our mean seek 1510 * distance, let it run. The expire code will check for close 1511 * cooperators and put the close queue at the front of the service 1512 * tree. If possible, merge the expiring queue with the new cfqq. 1513 */ 1514 new_cfqq = cfq_close_cooperator(cfqd, cfqq); 1515 if (new_cfqq) { 1516 if (!cfqq->new_cfqq) 1517 cfq_setup_merge(cfqq, new_cfqq); 1518 goto expire; 1519 } 1520 1521 /* 1522 * No requests pending. If the active queue still has requests in 1523 * flight or is idling for a new request, allow either of these 1524 * conditions to happen (or time out) before selecting a new queue. 1525 */ 1526 if (timer_pending(&cfqd->idle_slice_timer) || 1527 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) { 1528 cfqq = NULL; 1529 goto keep_queue; 1530 } 1531 1532expire: 1533 cfq_slice_expired(cfqd, 0); 1534new_queue: 1535 /* 1536 * Current queue expired. Check if we have to switch to a new 1537 * service tree 1538 */ 1539 if (!new_cfqq) 1540 choose_service_tree(cfqd); 1541 1542 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 1543keep_queue: 1544 return cfqq; 1545} 1546 1547static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1548{ 1549 int dispatched = 0; 1550 1551 while (cfqq->next_rq) { 1552 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1553 dispatched++; 1554 } 1555 1556 BUG_ON(!list_empty(&cfqq->fifo)); 1557 return dispatched; 1558} 1559 1560/* 1561 * Drain our current requests. Used for barriers and when switching 1562 * io schedulers on-the-fly. 1563 */ 1564static int cfq_forced_dispatch(struct cfq_data *cfqd) 1565{ 1566 struct cfq_queue *cfqq; 1567 int dispatched = 0; 1568 int i, j; 1569 for (i = 0; i < 2; ++i) 1570 for (j = 0; j < 3; ++j) 1571 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j])) 1572 != NULL) 1573 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1574 1575 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL) 1576 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1577 1578 cfq_slice_expired(cfqd, 0); 1579 1580 BUG_ON(cfqd->busy_queues); 1581 1582 cfq_log(cfqd, "forced_dispatch=%d", dispatched); 1583 return dispatched; 1584} 1585 1586static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1587{ 1588 unsigned int max_dispatch; 1589 1590 /* 1591 * Drain async requests before we start sync IO 1592 */ 1593 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1594 return false; 1595 1596 /* 1597 * If this is an async queue and we have sync IO in flight, let it wait 1598 */ 1599 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1600 return false; 1601 1602 max_dispatch = cfqd->cfq_quantum; 1603 if (cfq_class_idle(cfqq)) 1604 max_dispatch = 1; 1605 1606 /* 1607 * Does this cfqq already have too much IO in flight? 1608 */ 1609 if (cfqq->dispatched >= max_dispatch) { 1610 /* 1611 * idle queue must always only have a single IO in flight 1612 */ 1613 if (cfq_class_idle(cfqq)) 1614 return false; 1615 1616 /* 1617 * We have other queues, don't allow more IO from this one 1618 */ 1619 if (cfqd->busy_queues > 1) 1620 return false; 1621 1622 /* 1623 * Sole queue user, allow bigger slice 1624 */ 1625 max_dispatch *= 4; 1626 } 1627 1628 /* 1629 * Async queues must wait a bit before being allowed dispatch. 1630 * We also ramp up the dispatch depth gradually for async IO, 1631 * based on the last sync IO we serviced 1632 */ 1633 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 1634 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; 1635 unsigned int depth; 1636 1637 depth = last_sync / cfqd->cfq_slice[1]; 1638 if (!depth && !cfqq->dispatched) 1639 depth = 1; 1640 if (depth < max_dispatch) 1641 max_dispatch = depth; 1642 } 1643 1644 /* 1645 * If we're below the current max, allow a dispatch 1646 */ 1647 return cfqq->dispatched < max_dispatch; 1648} 1649 1650/* 1651 * Dispatch a request from cfqq, moving them to the request queue 1652 * dispatch list. 1653 */ 1654static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1655{ 1656 struct request *rq; 1657 1658 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1659 1660 if (!cfq_may_dispatch(cfqd, cfqq)) 1661 return false; 1662 1663 /* 1664 * follow expired path, else get first next available 1665 */ 1666 rq = cfq_check_fifo(cfqq); 1667 if (!rq) 1668 rq = cfqq->next_rq; 1669 1670 /* 1671 * insert request into driver dispatch list 1672 */ 1673 cfq_dispatch_insert(cfqd->queue, rq); 1674 1675 if (!cfqd->active_cic) { 1676 struct cfq_io_context *cic = RQ_CIC(rq); 1677 1678 atomic_long_inc(&cic->ioc->refcount); 1679 cfqd->active_cic = cic; 1680 } 1681 1682 return true; 1683} 1684 1685/* 1686 * Find the cfqq that we need to service and move a request from that to the 1687 * dispatch list 1688 */ 1689static int cfq_dispatch_requests(struct request_queue *q, int force) 1690{ 1691 struct cfq_data *cfqd = q->elevator->elevator_data; 1692 struct cfq_queue *cfqq; 1693 1694 if (!cfqd->busy_queues) 1695 return 0; 1696 1697 if (unlikely(force)) 1698 return cfq_forced_dispatch(cfqd); 1699 1700 cfqq = cfq_select_queue(cfqd); 1701 if (!cfqq) 1702 return 0; 1703 1704 /* 1705 * Dispatch a request from this cfqq, if it is allowed 1706 */ 1707 if (!cfq_dispatch_request(cfqd, cfqq)) 1708 return 0; 1709 1710 cfqq->slice_dispatch++; 1711 cfq_clear_cfqq_must_dispatch(cfqq); 1712 1713 /* 1714 * expire an async queue immediately if it has used up its slice. idle 1715 * queue always expire after 1 dispatch round. 1716 */ 1717 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1718 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1719 cfq_class_idle(cfqq))) { 1720 cfqq->slice_end = jiffies + 1; 1721 cfq_slice_expired(cfqd, 0); 1722 } 1723 1724 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); 1725 return 1; 1726} 1727 1728/* 1729 * task holds one reference to the queue, dropped when task exits. each rq 1730 * in-flight on this queue also holds a reference, dropped when rq is freed. 1731 * 1732 * queue lock must be held here. 1733 */ 1734static void cfq_put_queue(struct cfq_queue *cfqq) 1735{ 1736 struct cfq_data *cfqd = cfqq->cfqd; 1737 1738 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1739 1740 if (!atomic_dec_and_test(&cfqq->ref)) 1741 return; 1742 1743 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 1744 BUG_ON(rb_first(&cfqq->sort_list)); 1745 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1746 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1747 1748 if (unlikely(cfqd->active_queue == cfqq)) { 1749 __cfq_slice_expired(cfqd, cfqq, 0); 1750 cfq_schedule_dispatch(cfqd); 1751 } 1752 1753 kmem_cache_free(cfq_pool, cfqq); 1754} 1755 1756/* 1757 * Must always be called with the rcu_read_lock() held 1758 */ 1759static void 1760__call_for_each_cic(struct io_context *ioc, 1761 void (*func)(struct io_context *, struct cfq_io_context *)) 1762{ 1763 struct cfq_io_context *cic; 1764 struct hlist_node *n; 1765 1766 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 1767 func(ioc, cic); 1768} 1769 1770/* 1771 * Call func for each cic attached to this ioc. 1772 */ 1773static void 1774call_for_each_cic(struct io_context *ioc, 1775 void (*func)(struct io_context *, struct cfq_io_context *)) 1776{ 1777 rcu_read_lock(); 1778 __call_for_each_cic(ioc, func); 1779 rcu_read_unlock(); 1780} 1781 1782static void cfq_cic_free_rcu(struct rcu_head *head) 1783{ 1784 struct cfq_io_context *cic; 1785 1786 cic = container_of(head, struct cfq_io_context, rcu_head); 1787 1788 kmem_cache_free(cfq_ioc_pool, cic); 1789 elv_ioc_count_dec(cfq_ioc_count); 1790 1791 if (ioc_gone) { 1792 /* 1793 * CFQ scheduler is exiting, grab exit lock and check 1794 * the pending io context count. If it hits zero, 1795 * complete ioc_gone and set it back to NULL 1796 */ 1797 spin_lock(&ioc_gone_lock); 1798 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { 1799 complete(ioc_gone); 1800 ioc_gone = NULL; 1801 } 1802 spin_unlock(&ioc_gone_lock); 1803 } 1804} 1805 1806static void cfq_cic_free(struct cfq_io_context *cic) 1807{ 1808 call_rcu(&cic->rcu_head, cfq_cic_free_rcu); 1809} 1810 1811static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 1812{ 1813 unsigned long flags; 1814 1815 BUG_ON(!cic->dead_key); 1816 1817 spin_lock_irqsave(&ioc->lock, flags); 1818 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1819 hlist_del_rcu(&cic->cic_list); 1820 spin_unlock_irqrestore(&ioc->lock, flags); 1821 1822 cfq_cic_free(cic); 1823} 1824 1825/* 1826 * Must be called with rcu_read_lock() held or preemption otherwise disabled. 1827 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), 1828 * and ->trim() which is called with the task lock held 1829 */ 1830static void cfq_free_io_context(struct io_context *ioc) 1831{ 1832 /* 1833 * ioc->refcount is zero here, or we are called from elv_unregister(), 1834 * so no more cic's are allowed to be linked into this ioc. So it 1835 * should be ok to iterate over the known list, we will see all cic's 1836 * since no new ones are added. 1837 */ 1838 __call_for_each_cic(ioc, cic_free_func); 1839} 1840 1841static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1842{ 1843 struct cfq_queue *__cfqq, *next; 1844 1845 if (unlikely(cfqq == cfqd->active_queue)) { 1846 __cfq_slice_expired(cfqd, cfqq, 0); 1847 cfq_schedule_dispatch(cfqd); 1848 } 1849 1850 /* 1851 * If this queue was scheduled to merge with another queue, be 1852 * sure to drop the reference taken on that queue (and others in 1853 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. 1854 */ 1855 __cfqq = cfqq->new_cfqq; 1856 while (__cfqq) { 1857 if (__cfqq == cfqq) { 1858 WARN(1, "cfqq->new_cfqq loop detected\n"); 1859 break; 1860 } 1861 next = __cfqq->new_cfqq; 1862 cfq_put_queue(__cfqq); 1863 __cfqq = next; 1864 } 1865 1866 cfq_put_queue(cfqq); 1867} 1868 1869static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1870 struct cfq_io_context *cic) 1871{ 1872 struct io_context *ioc = cic->ioc; 1873 1874 list_del_init(&cic->queue_list); 1875 1876 /* 1877 * Make sure key == NULL is seen for dead queues 1878 */ 1879 smp_wmb(); 1880 cic->dead_key = (unsigned long) cic->key; 1881 cic->key = NULL; 1882 1883 if (ioc->ioc_data == cic) 1884 rcu_assign_pointer(ioc->ioc_data, NULL); 1885 1886 if (cic->cfqq[BLK_RW_ASYNC]) { 1887 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 1888 cic->cfqq[BLK_RW_ASYNC] = NULL; 1889 } 1890 1891 if (cic->cfqq[BLK_RW_SYNC]) { 1892 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); 1893 cic->cfqq[BLK_RW_SYNC] = NULL; 1894 } 1895} 1896 1897static void cfq_exit_single_io_context(struct io_context *ioc, 1898 struct cfq_io_context *cic) 1899{ 1900 struct cfq_data *cfqd = cic->key; 1901 1902 if (cfqd) { 1903 struct request_queue *q = cfqd->queue; 1904 unsigned long flags; 1905 1906 spin_lock_irqsave(q->queue_lock, flags); 1907 1908 /* 1909 * Ensure we get a fresh copy of the ->key to prevent 1910 * race between exiting task and queue 1911 */ 1912 smp_read_barrier_depends(); 1913 if (cic->key) 1914 __cfq_exit_single_io_context(cfqd, cic); 1915 1916 spin_unlock_irqrestore(q->queue_lock, flags); 1917 } 1918} 1919 1920/* 1921 * The process that ioc belongs to has exited, we need to clean up 1922 * and put the internal structures we have that belongs to that process. 1923 */ 1924static void cfq_exit_io_context(struct io_context *ioc) 1925{ 1926 call_for_each_cic(ioc, cfq_exit_single_io_context); 1927} 1928 1929static struct cfq_io_context * 1930cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1931{ 1932 struct cfq_io_context *cic; 1933 1934 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, 1935 cfqd->queue->node); 1936 if (cic) { 1937 cic->last_end_request = jiffies; 1938 INIT_LIST_HEAD(&cic->queue_list); 1939 INIT_HLIST_NODE(&cic->cic_list); 1940 cic->dtor = cfq_free_io_context; 1941 cic->exit = cfq_exit_io_context; 1942 elv_ioc_count_inc(cfq_ioc_count); 1943 } 1944 1945 return cic; 1946} 1947 1948static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 1949{ 1950 struct task_struct *tsk = current; 1951 int ioprio_class; 1952 1953 if (!cfq_cfqq_prio_changed(cfqq)) 1954 return; 1955 1956 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1957 switch (ioprio_class) { 1958 default: 1959 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1960 case IOPRIO_CLASS_NONE: 1961 /* 1962 * no prio set, inherit CPU scheduling settings 1963 */ 1964 cfqq->ioprio = task_nice_ioprio(tsk); 1965 cfqq->ioprio_class = task_nice_ioclass(tsk); 1966 break; 1967 case IOPRIO_CLASS_RT: 1968 cfqq->ioprio = task_ioprio(ioc); 1969 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1970 break; 1971 case IOPRIO_CLASS_BE: 1972 cfqq->ioprio = task_ioprio(ioc); 1973 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1974 break; 1975 case IOPRIO_CLASS_IDLE: 1976 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1977 cfqq->ioprio = 7; 1978 cfq_clear_cfqq_idle_window(cfqq); 1979 break; 1980 } 1981 1982 /* 1983 * keep track of original prio settings in case we have to temporarily 1984 * elevate the priority of this queue 1985 */ 1986 cfqq->org_ioprio = cfqq->ioprio; 1987 cfqq->org_ioprio_class = cfqq->ioprio_class; 1988 cfq_clear_cfqq_prio_changed(cfqq); 1989} 1990 1991static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 1992{ 1993 struct cfq_data *cfqd = cic->key; 1994 struct cfq_queue *cfqq; 1995 unsigned long flags; 1996 1997 if (unlikely(!cfqd)) 1998 return; 1999 2000 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2001 2002 cfqq = cic->cfqq[BLK_RW_ASYNC]; 2003 if (cfqq) { 2004 struct cfq_queue *new_cfqq; 2005 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, 2006 GFP_ATOMIC); 2007 if (new_cfqq) { 2008 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 2009 cfq_put_queue(cfqq); 2010 } 2011 } 2012 2013 cfqq = cic->cfqq[BLK_RW_SYNC]; 2014 if (cfqq) 2015 cfq_mark_cfqq_prio_changed(cfqq); 2016 2017 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2018} 2019 2020static void cfq_ioc_set_ioprio(struct io_context *ioc) 2021{ 2022 call_for_each_cic(ioc, changed_ioprio); 2023 ioc->ioprio_changed = 0; 2024} 2025 2026static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2027 pid_t pid, bool is_sync) 2028{ 2029 RB_CLEAR_NODE(&cfqq->rb_node); 2030 RB_CLEAR_NODE(&cfqq->p_node); 2031 INIT_LIST_HEAD(&cfqq->fifo); 2032 2033 atomic_set(&cfqq->ref, 0); 2034 cfqq->cfqd = cfqd; 2035 2036 cfq_mark_cfqq_prio_changed(cfqq); 2037 2038 if (is_sync) { 2039 if (!cfq_class_idle(cfqq)) 2040 cfq_mark_cfqq_idle_window(cfqq); 2041 cfq_mark_cfqq_sync(cfqq); 2042 } 2043 cfqq->pid = pid; 2044} 2045 2046static struct cfq_queue * 2047cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 2048 struct io_context *ioc, gfp_t gfp_mask) 2049{ 2050 struct cfq_queue *cfqq, *new_cfqq = NULL; 2051 struct cfq_io_context *cic; 2052 2053retry: 2054 cic = cfq_cic_lookup(cfqd, ioc); 2055 /* cic always exists here */ 2056 cfqq = cic_to_cfqq(cic, is_sync); 2057 2058 /* 2059 * Always try a new alloc if we fell back to the OOM cfqq 2060 * originally, since it should just be a temporary situation. 2061 */ 2062 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 2063 cfqq = NULL; 2064 if (new_cfqq) { 2065 cfqq = new_cfqq; 2066 new_cfqq = NULL; 2067 } else if (gfp_mask & __GFP_WAIT) { 2068 spin_unlock_irq(cfqd->queue->queue_lock); 2069 new_cfqq = kmem_cache_alloc_node(cfq_pool, 2070 gfp_mask | __GFP_ZERO, 2071 cfqd->queue->node); 2072 spin_lock_irq(cfqd->queue->queue_lock); 2073 if (new_cfqq) 2074 goto retry; 2075 } else { 2076 cfqq = kmem_cache_alloc_node(cfq_pool, 2077 gfp_mask | __GFP_ZERO, 2078 cfqd->queue->node); 2079 } 2080 2081 if (cfqq) { 2082 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); 2083 cfq_init_prio_data(cfqq, ioc); 2084 cfq_log_cfqq(cfqd, cfqq, "alloced"); 2085 } else 2086 cfqq = &cfqd->oom_cfqq; 2087 } 2088 2089 if (new_cfqq) 2090 kmem_cache_free(cfq_pool, new_cfqq); 2091 2092 return cfqq; 2093} 2094 2095static struct cfq_queue ** 2096cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 2097{ 2098 switch (ioprio_class) { 2099 case IOPRIO_CLASS_RT: 2100 return &cfqd->async_cfqq[0][ioprio]; 2101 case IOPRIO_CLASS_BE: 2102 return &cfqd->async_cfqq[1][ioprio]; 2103 case IOPRIO_CLASS_IDLE: 2104 return &cfqd->async_idle_cfqq; 2105 default: 2106 BUG(); 2107 } 2108} 2109 2110static struct cfq_queue * 2111cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, 2112 gfp_t gfp_mask) 2113{ 2114 const int ioprio = task_ioprio(ioc); 2115 const int ioprio_class = task_ioprio_class(ioc); 2116 struct cfq_queue **async_cfqq = NULL; 2117 struct cfq_queue *cfqq = NULL; 2118 2119 if (!is_sync) { 2120 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 2121 cfqq = *async_cfqq; 2122 } 2123 2124 if (!cfqq) 2125 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); 2126 2127 /* 2128 * pin the queue now that it's allocated, scheduler exit will prune it 2129 */ 2130 if (!is_sync && !(*async_cfqq)) { 2131 atomic_inc(&cfqq->ref); 2132 *async_cfqq = cfqq; 2133 } 2134 2135 atomic_inc(&cfqq->ref); 2136 return cfqq; 2137} 2138 2139/* 2140 * We drop cfq io contexts lazily, so we may find a dead one. 2141 */ 2142static void 2143cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, 2144 struct cfq_io_context *cic) 2145{ 2146 unsigned long flags; 2147 2148 WARN_ON(!list_empty(&cic->queue_list)); 2149 2150 spin_lock_irqsave(&ioc->lock, flags); 2151 2152 BUG_ON(ioc->ioc_data == cic); 2153 2154 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 2155 hlist_del_rcu(&cic->cic_list); 2156 spin_unlock_irqrestore(&ioc->lock, flags); 2157 2158 cfq_cic_free(cic); 2159} 2160 2161static struct cfq_io_context * 2162cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 2163{ 2164 struct cfq_io_context *cic; 2165 unsigned long flags; 2166 void *k; 2167 2168 if (unlikely(!ioc)) 2169 return NULL; 2170 2171 rcu_read_lock(); 2172 2173 /* 2174 * we maintain a last-hit cache, to avoid browsing over the tree 2175 */ 2176 cic = rcu_dereference(ioc->ioc_data); 2177 if (cic && cic->key == cfqd) { 2178 rcu_read_unlock(); 2179 return cic; 2180 } 2181 2182 do { 2183 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 2184 rcu_read_unlock(); 2185 if (!cic) 2186 break; 2187 /* ->key must be copied to avoid race with cfq_exit_queue() */ 2188 k = cic->key; 2189 if (unlikely(!k)) { 2190 cfq_drop_dead_cic(cfqd, ioc, cic); 2191 rcu_read_lock(); 2192 continue; 2193 } 2194 2195 spin_lock_irqsave(&ioc->lock, flags); 2196 rcu_assign_pointer(ioc->ioc_data, cic); 2197 spin_unlock_irqrestore(&ioc->lock, flags); 2198 break; 2199 } while (1); 2200 2201 return cic; 2202} 2203 2204/* 2205 * Add cic into ioc, using cfqd as the search key. This enables us to lookup 2206 * the process specific cfq io context when entered from the block layer. 2207 * Also adds the cic to a per-cfqd list, used when this queue is removed. 2208 */ 2209static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 2210 struct cfq_io_context *cic, gfp_t gfp_mask) 2211{ 2212 unsigned long flags; 2213 int ret; 2214 2215 ret = radix_tree_preload(gfp_mask); 2216 if (!ret) { 2217 cic->ioc = ioc; 2218 cic->key = cfqd; 2219 2220 spin_lock_irqsave(&ioc->lock, flags); 2221 ret = radix_tree_insert(&ioc->radix_root, 2222 (unsigned long) cfqd, cic); 2223 if (!ret) 2224 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 2225 spin_unlock_irqrestore(&ioc->lock, flags); 2226 2227 radix_tree_preload_end(); 2228 2229 if (!ret) { 2230 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2231 list_add(&cic->queue_list, &cfqd->cic_list); 2232 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2233 } 2234 } 2235 2236 if (ret) 2237 printk(KERN_ERR "cfq: cic link failed!\n"); 2238 2239 return ret; 2240} 2241 2242/* 2243 * Setup general io context and cfq io context. There can be several cfq 2244 * io contexts per general io context, if this process is doing io to more 2245 * than one device managed by cfq. 2246 */ 2247static struct cfq_io_context * 2248cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 2249{ 2250 struct io_context *ioc = NULL; 2251 struct cfq_io_context *cic; 2252 2253 might_sleep_if(gfp_mask & __GFP_WAIT); 2254 2255 ioc = get_io_context(gfp_mask, cfqd->queue->node); 2256 if (!ioc) 2257 return NULL; 2258 2259 cic = cfq_cic_lookup(cfqd, ioc); 2260 if (cic) 2261 goto out; 2262 2263 cic = cfq_alloc_io_context(cfqd, gfp_mask); 2264 if (cic == NULL) 2265 goto err; 2266 2267 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 2268 goto err_free; 2269 2270out: 2271 smp_read_barrier_depends(); 2272 if (unlikely(ioc->ioprio_changed)) 2273 cfq_ioc_set_ioprio(ioc); 2274 2275 return cic; 2276err_free: 2277 cfq_cic_free(cic); 2278err: 2279 put_io_context(ioc); 2280 return NULL; 2281} 2282 2283static void 2284cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 2285{ 2286 unsigned long elapsed = jiffies - cic->last_end_request; 2287 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 2288 2289 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 2290 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 2291 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 2292} 2293 2294static void 2295cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2296 struct request *rq) 2297{ 2298 sector_t sdist; 2299 u64 total; 2300 2301 if (!cfqq->last_request_pos) 2302 sdist = 0; 2303 else if (cfqq->last_request_pos < blk_rq_pos(rq)) 2304 sdist = blk_rq_pos(rq) - cfqq->last_request_pos; 2305 else 2306 sdist = cfqq->last_request_pos - blk_rq_pos(rq); 2307 2308 /* 2309 * Don't allow the seek distance to get too large from the 2310 * odd fragment, pagein, etc 2311 */ 2312 if (cfqq->seek_samples <= 60) /* second&third seek */ 2313 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024); 2314 else 2315 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64); 2316 2317 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; 2318 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; 2319 total = cfqq->seek_total + (cfqq->seek_samples/2); 2320 do_div(total, cfqq->seek_samples); 2321 cfqq->seek_mean = (sector_t)total; 2322 2323 /* 2324 * If this cfqq is shared between multiple processes, check to 2325 * make sure that those processes are still issuing I/Os within 2326 * the mean seek distance. If not, it may be time to break the 2327 * queues apart again. 2328 */ 2329 if (cfq_cfqq_coop(cfqq)) { 2330 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) 2331 cfqq->seeky_start = jiffies; 2332 else if (!CFQQ_SEEKY(cfqq)) 2333 cfqq->seeky_start = 0; 2334 } 2335} 2336 2337/* 2338 * Disable idle window if the process thinks too long or seeks so much that 2339 * it doesn't matter 2340 */ 2341static void 2342cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2343 struct cfq_io_context *cic) 2344{ 2345 int old_idle, enable_idle; 2346 2347 /* 2348 * Don't idle for async or idle io prio class 2349 */ 2350 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 2351 return; 2352 2353 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 2354 2355 if (cfqq->queued[0] + cfqq->queued[1] >= 4) 2356 cfq_mark_cfqq_deep(cfqq); 2357 2358 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 2359 (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples) 2360 && CFQQ_SEEKY(cfqq))) 2361 enable_idle = 0; 2362 else if (sample_valid(cic->ttime_samples)) { 2363 if (cic->ttime_mean > cfqd->cfq_slice_idle) 2364 enable_idle = 0; 2365 else 2366 enable_idle = 1; 2367 } 2368 2369 if (old_idle != enable_idle) { 2370 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); 2371 if (enable_idle) 2372 cfq_mark_cfqq_idle_window(cfqq); 2373 else 2374 cfq_clear_cfqq_idle_window(cfqq); 2375 } 2376} 2377 2378/* 2379 * Check if new_cfqq should preempt the currently active queue. Return 0 for 2380 * no or if we aren't sure, a 1 will cause a preempt. 2381 */ 2382static bool 2383cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2384 struct request *rq) 2385{ 2386 struct cfq_queue *cfqq; 2387 2388 cfqq = cfqd->active_queue; 2389 if (!cfqq) 2390 return false; 2391 2392 if (cfq_slice_used(cfqq)) 2393 return true; 2394 2395 if (cfq_class_idle(new_cfqq)) 2396 return false; 2397 2398 if (cfq_class_idle(cfqq)) 2399 return true; 2400 2401 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && 2402 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && 2403 new_cfqq->service_tree->count == 1) 2404 return true; 2405 2406 /* 2407 * if the new request is sync, but the currently running queue is 2408 * not, let the sync request have priority. 2409 */ 2410 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2411 return true; 2412 2413 /* 2414 * So both queues are sync. Let the new request get disk time if 2415 * it's a metadata request and the current queue is doing regular IO. 2416 */ 2417 if (rq_is_meta(rq) && !cfqq->meta_pending) 2418 return true; 2419 2420 /* 2421 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2422 */ 2423 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2424 return true; 2425 2426 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2427 return false; 2428 2429 /* 2430 * if this request is as-good as one we would expect from the 2431 * current cfqq, let it preempt 2432 */ 2433 if (cfq_rq_close(cfqd, cfqq, rq)) 2434 return true; 2435 2436 return false; 2437} 2438 2439/* 2440 * cfqq preempts the active queue. if we allowed preempt with no slice left, 2441 * let it have half of its nominal slice. 2442 */ 2443static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2444{ 2445 cfq_log_cfqq(cfqd, cfqq, "preempt"); 2446 cfq_slice_expired(cfqd, 1); 2447 2448 /* 2449 * Put the new queue at the front of the of the current list, 2450 * so we know that it will be selected next. 2451 */ 2452 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 2453 2454 cfq_service_tree_add(cfqd, cfqq, 1); 2455 2456 cfqq->slice_end = 0; 2457 cfq_mark_cfqq_slice_new(cfqq); 2458} 2459 2460/* 2461 * Called when a new fs request (rq) is added (to cfqq). Check if there's 2462 * something we should do about it 2463 */ 2464static void 2465cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2466 struct request *rq) 2467{ 2468 struct cfq_io_context *cic = RQ_CIC(rq); 2469 2470 cfqd->rq_queued++; 2471 if (rq_is_meta(rq)) 2472 cfqq->meta_pending++; 2473 2474 cfq_update_io_thinktime(cfqd, cic); 2475 cfq_update_io_seektime(cfqd, cfqq, rq); 2476 cfq_update_idle_window(cfqd, cfqq, cic); 2477 2478 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 2479 2480 if (cfqq == cfqd->active_queue) { 2481 /* 2482 * Remember that we saw a request from this process, but 2483 * don't start queuing just yet. Otherwise we risk seeing lots 2484 * of tiny requests, because we disrupt the normal plugging 2485 * and merging. If the request is already larger than a single 2486 * page, let it rip immediately. For that case we assume that 2487 * merging is already done. Ditto for a busy system that 2488 * has other work pending, don't risk delaying until the 2489 * idle timer unplug to continue working. 2490 */ 2491 if (cfq_cfqq_wait_request(cfqq)) { 2492 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 2493 cfqd->busy_queues > 1) { 2494 del_timer(&cfqd->idle_slice_timer); 2495 __blk_run_queue(cfqd->queue); 2496 } 2497 cfq_mark_cfqq_must_dispatch(cfqq); 2498 } 2499 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 2500 /* 2501 * not the active queue - expire current slice if it is 2502 * idle and has expired it's mean thinktime or this new queue 2503 * has some old slice time left and is of higher priority or 2504 * this new queue is RT and the current one is BE 2505 */ 2506 cfq_preempt_queue(cfqd, cfqq); 2507 __blk_run_queue(cfqd->queue); 2508 } 2509} 2510 2511static void cfq_insert_request(struct request_queue *q, struct request *rq) 2512{ 2513 struct cfq_data *cfqd = q->elevator->elevator_data; 2514 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2515 2516 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 2517 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 2518 2519 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 2520 list_add_tail(&rq->queuelist, &cfqq->fifo); 2521 cfq_add_rq_rb(rq); 2522 2523 cfq_rq_enqueued(cfqd, cfqq, rq); 2524} 2525 2526/* 2527 * Update hw_tag based on peak queue depth over 50 samples under 2528 * sufficient load. 2529 */ 2530static void cfq_update_hw_tag(struct cfq_data *cfqd) 2531{ 2532 struct cfq_queue *cfqq = cfqd->active_queue; 2533 2534 if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth) 2535 cfqd->hw_tag_est_depth = rq_in_driver(cfqd); 2536 2537 if (cfqd->hw_tag == 1) 2538 return; 2539 2540 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 2541 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) 2542 return; 2543 2544 /* 2545 * If active queue hasn't enough requests and can idle, cfq might not 2546 * dispatch sufficient requests to hardware. Don't zero hw_tag in this 2547 * case 2548 */ 2549 if (cfqq && cfq_cfqq_idle_window(cfqq) && 2550 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < 2551 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN) 2552 return; 2553 2554 if (cfqd->hw_tag_samples++ < 50) 2555 return; 2556 2557 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) 2558 cfqd->hw_tag = 1; 2559 else 2560 cfqd->hw_tag = 0; 2561} 2562 2563static void cfq_completed_request(struct request_queue *q, struct request *rq) 2564{ 2565 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2566 struct cfq_data *cfqd = cfqq->cfqd; 2567 const int sync = rq_is_sync(rq); 2568 unsigned long now; 2569 2570 now = jiffies; 2571 cfq_log_cfqq(cfqd, cfqq, "complete"); 2572 2573 cfq_update_hw_tag(cfqd); 2574 2575 WARN_ON(!cfqd->rq_in_driver[sync]); 2576 WARN_ON(!cfqq->dispatched); 2577 cfqd->rq_in_driver[sync]--; 2578 cfqq->dispatched--; 2579 2580 if (cfq_cfqq_sync(cfqq)) 2581 cfqd->sync_flight--; 2582 2583 if (sync) { 2584 RQ_CIC(rq)->last_end_request = now; 2585 cfqd->last_end_sync_rq = now; 2586 } 2587 2588 /* 2589 * If this is the active queue, check if it needs to be expired, 2590 * or if we want to idle in case it has no pending requests. 2591 */ 2592 if (cfqd->active_queue == cfqq) { 2593 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); 2594 2595 if (cfq_cfqq_slice_new(cfqq)) { 2596 cfq_set_prio_slice(cfqd, cfqq); 2597 cfq_clear_cfqq_slice_new(cfqq); 2598 } 2599 /* 2600 * If there are no requests waiting in this queue, and 2601 * there are other queues ready to issue requests, AND 2602 * those other queues are issuing requests within our 2603 * mean seek distance, give them a chance to run instead 2604 * of idling. 2605 */ 2606 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 2607 cfq_slice_expired(cfqd, 1); 2608 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) && 2609 sync && !rq_noidle(rq)) 2610 cfq_arm_slice_timer(cfqd); 2611 } 2612 2613 if (!rq_in_driver(cfqd)) 2614 cfq_schedule_dispatch(cfqd); 2615} 2616 2617/* 2618 * we temporarily boost lower priority queues if they are holding fs exclusive 2619 * resources. they are boosted to normal prio (CLASS_BE/4) 2620 */ 2621static void cfq_prio_boost(struct cfq_queue *cfqq) 2622{ 2623 if (has_fs_excl()) { 2624 /* 2625 * boost idle prio on transactions that would lock out other 2626 * users of the filesystem 2627 */ 2628 if (cfq_class_idle(cfqq)) 2629 cfqq->ioprio_class = IOPRIO_CLASS_BE; 2630 if (cfqq->ioprio > IOPRIO_NORM) 2631 cfqq->ioprio = IOPRIO_NORM; 2632 } else { 2633 /* 2634 * unboost the queue (if needed) 2635 */ 2636 cfqq->ioprio_class = cfqq->org_ioprio_class; 2637 cfqq->ioprio = cfqq->org_ioprio; 2638 } 2639} 2640 2641static inline int __cfq_may_queue(struct cfq_queue *cfqq) 2642{ 2643 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { 2644 cfq_mark_cfqq_must_alloc_slice(cfqq); 2645 return ELV_MQUEUE_MUST; 2646 } 2647 2648 return ELV_MQUEUE_MAY; 2649} 2650 2651static int cfq_may_queue(struct request_queue *q, int rw) 2652{ 2653 struct cfq_data *cfqd = q->elevator->elevator_data; 2654 struct task_struct *tsk = current; 2655 struct cfq_io_context *cic; 2656 struct cfq_queue *cfqq; 2657 2658 /* 2659 * don't force setup of a queue from here, as a call to may_queue 2660 * does not necessarily imply that a request actually will be queued. 2661 * so just lookup a possibly existing queue, or return 'may queue' 2662 * if that fails 2663 */ 2664 cic = cfq_cic_lookup(cfqd, tsk->io_context); 2665 if (!cic) 2666 return ELV_MQUEUE_MAY; 2667 2668 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 2669 if (cfqq) { 2670 cfq_init_prio_data(cfqq, cic->ioc); 2671 cfq_prio_boost(cfqq); 2672 2673 return __cfq_may_queue(cfqq); 2674 } 2675 2676 return ELV_MQUEUE_MAY; 2677} 2678 2679/* 2680 * queue lock held here 2681 */ 2682static void cfq_put_request(struct request *rq) 2683{ 2684 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2685 2686 if (cfqq) { 2687 const int rw = rq_data_dir(rq); 2688 2689 BUG_ON(!cfqq->allocated[rw]); 2690 cfqq->allocated[rw]--; 2691 2692 put_io_context(RQ_CIC(rq)->ioc); 2693 2694 rq->elevator_private = NULL; 2695 rq->elevator_private2 = NULL; 2696 2697 cfq_put_queue(cfqq); 2698 } 2699} 2700 2701static struct cfq_queue * 2702cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, 2703 struct cfq_queue *cfqq) 2704{ 2705 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); 2706 cic_set_cfqq(cic, cfqq->new_cfqq, 1); 2707 cfq_mark_cfqq_coop(cfqq->new_cfqq); 2708 cfq_put_queue(cfqq); 2709 return cic_to_cfqq(cic, 1); 2710} 2711 2712static int should_split_cfqq(struct cfq_queue *cfqq) 2713{ 2714 if (cfqq->seeky_start && 2715 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) 2716 return 1; 2717 return 0; 2718} 2719 2720/* 2721 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 2722 * was the last process referring to said cfqq. 2723 */ 2724static struct cfq_queue * 2725split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) 2726{ 2727 if (cfqq_process_refs(cfqq) == 1) { 2728 cfqq->seeky_start = 0; 2729 cfqq->pid = current->pid; 2730 cfq_clear_cfqq_coop(cfqq); 2731 return cfqq; 2732 } 2733 2734 cic_set_cfqq(cic, NULL, 1); 2735 cfq_put_queue(cfqq); 2736 return NULL; 2737} 2738/* 2739 * Allocate cfq data structures associated with this request. 2740 */ 2741static int 2742cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 2743{ 2744 struct cfq_data *cfqd = q->elevator->elevator_data; 2745 struct cfq_io_context *cic; 2746 const int rw = rq_data_dir(rq); 2747 const bool is_sync = rq_is_sync(rq); 2748 struct cfq_queue *cfqq; 2749 unsigned long flags; 2750 2751 might_sleep_if(gfp_mask & __GFP_WAIT); 2752 2753 cic = cfq_get_io_context(cfqd, gfp_mask); 2754 2755 spin_lock_irqsave(q->queue_lock, flags); 2756 2757 if (!cic) 2758 goto queue_fail; 2759 2760new_queue: 2761 cfqq = cic_to_cfqq(cic, is_sync); 2762 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 2763 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2764 cic_set_cfqq(cic, cfqq, is_sync); 2765 } else { 2766 /* 2767 * If the queue was seeky for too long, break it apart. 2768 */ 2769 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { 2770 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 2771 cfqq = split_cfqq(cic, cfqq); 2772 if (!cfqq) 2773 goto new_queue; 2774 } 2775 2776 /* 2777 * Check to see if this queue is scheduled to merge with 2778 * another, closely cooperating queue. The merging of 2779 * queues happens here as it must be done in process context. 2780 * The reference on new_cfqq was taken in merge_cfqqs. 2781 */ 2782 if (cfqq->new_cfqq) 2783 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); 2784 } 2785 2786 cfqq->allocated[rw]++; 2787 atomic_inc(&cfqq->ref); 2788 2789 spin_unlock_irqrestore(q->queue_lock, flags); 2790 2791 rq->elevator_private = cic; 2792 rq->elevator_private2 = cfqq; 2793 return 0; 2794 2795queue_fail: 2796 if (cic) 2797 put_io_context(cic->ioc); 2798 2799 cfq_schedule_dispatch(cfqd); 2800 spin_unlock_irqrestore(q->queue_lock, flags); 2801 cfq_log(cfqd, "set_request fail"); 2802 return 1; 2803} 2804 2805static void cfq_kick_queue(struct work_struct *work) 2806{ 2807 struct cfq_data *cfqd = 2808 container_of(work, struct cfq_data, unplug_work); 2809 struct request_queue *q = cfqd->queue; 2810 2811 spin_lock_irq(q->queue_lock); 2812 __blk_run_queue(cfqd->queue); 2813 spin_unlock_irq(q->queue_lock); 2814} 2815 2816/* 2817 * Timer running if the active_queue is currently idling inside its time slice 2818 */ 2819static void cfq_idle_slice_timer(unsigned long data) 2820{ 2821 struct cfq_data *cfqd = (struct cfq_data *) data; 2822 struct cfq_queue *cfqq; 2823 unsigned long flags; 2824 int timed_out = 1; 2825 2826 cfq_log(cfqd, "idle timer fired"); 2827 2828 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2829 2830 cfqq = cfqd->active_queue; 2831 if (cfqq) { 2832 timed_out = 0; 2833 2834 /* 2835 * We saw a request before the queue expired, let it through 2836 */ 2837 if (cfq_cfqq_must_dispatch(cfqq)) 2838 goto out_kick; 2839 2840 /* 2841 * expired 2842 */ 2843 if (cfq_slice_used(cfqq)) 2844 goto expire; 2845 2846 /* 2847 * only expire and reinvoke request handler, if there are 2848 * other queues with pending requests 2849 */ 2850 if (!cfqd->busy_queues) 2851 goto out_cont; 2852 2853 /* 2854 * not expired and it has a request pending, let it dispatch 2855 */ 2856 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 2857 goto out_kick; 2858 2859 /* 2860 * Queue depth flag is reset only when the idle didn't succeed 2861 */ 2862 cfq_clear_cfqq_deep(cfqq); 2863 } 2864expire: 2865 cfq_slice_expired(cfqd, timed_out); 2866out_kick: 2867 cfq_schedule_dispatch(cfqd); 2868out_cont: 2869 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2870} 2871 2872static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2873{ 2874 del_timer_sync(&cfqd->idle_slice_timer); 2875 cancel_work_sync(&cfqd->unplug_work); 2876} 2877 2878static void cfq_put_async_queues(struct cfq_data *cfqd) 2879{ 2880 int i; 2881 2882 for (i = 0; i < IOPRIO_BE_NR; i++) { 2883 if (cfqd->async_cfqq[0][i]) 2884 cfq_put_queue(cfqd->async_cfqq[0][i]); 2885 if (cfqd->async_cfqq[1][i]) 2886 cfq_put_queue(cfqd->async_cfqq[1][i]); 2887 } 2888 2889 if (cfqd->async_idle_cfqq) 2890 cfq_put_queue(cfqd->async_idle_cfqq); 2891} 2892 2893static void cfq_exit_queue(struct elevator_queue *e) 2894{ 2895 struct cfq_data *cfqd = e->elevator_data; 2896 struct request_queue *q = cfqd->queue; 2897 2898 cfq_shutdown_timer_wq(cfqd); 2899 2900 spin_lock_irq(q->queue_lock); 2901 2902 if (cfqd->active_queue) 2903 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2904 2905 while (!list_empty(&cfqd->cic_list)) { 2906 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2907 struct cfq_io_context, 2908 queue_list); 2909 2910 __cfq_exit_single_io_context(cfqd, cic); 2911 } 2912 2913 cfq_put_async_queues(cfqd); 2914 2915 spin_unlock_irq(q->queue_lock); 2916 2917 cfq_shutdown_timer_wq(cfqd); 2918 2919 kfree(cfqd); 2920} 2921 2922static void *cfq_init_queue(struct request_queue *q) 2923{ 2924 struct cfq_data *cfqd; 2925 int i, j; 2926 2927 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2928 if (!cfqd) 2929 return NULL; 2930 2931 for (i = 0; i < 2; ++i) 2932 for (j = 0; j < 3; ++j) 2933 cfqd->service_trees[i][j] = CFQ_RB_ROOT; 2934 cfqd->service_tree_idle = CFQ_RB_ROOT; 2935 2936 /* 2937 * Not strictly needed (since RB_ROOT just clears the node and we 2938 * zeroed cfqd on alloc), but better be safe in case someone decides 2939 * to add magic to the rb code 2940 */ 2941 for (i = 0; i < CFQ_PRIO_LISTS; i++) 2942 cfqd->prio_trees[i] = RB_ROOT; 2943 2944 /* 2945 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. 2946 * Grab a permanent reference to it, so that the normal code flow 2947 * will not attempt to free it. 2948 */ 2949 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 2950 atomic_inc(&cfqd->oom_cfqq.ref); 2951 2952 INIT_LIST_HEAD(&cfqd->cic_list); 2953 2954 cfqd->queue = q; 2955 2956 init_timer(&cfqd->idle_slice_timer); 2957 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2958 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2959 2960 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2961 2962 cfqd->cfq_quantum = cfq_quantum; 2963 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2964 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2965 cfqd->cfq_back_max = cfq_back_max; 2966 cfqd->cfq_back_penalty = cfq_back_penalty; 2967 cfqd->cfq_slice[0] = cfq_slice_async; 2968 cfqd->cfq_slice[1] = cfq_slice_sync; 2969 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2970 cfqd->cfq_slice_idle = cfq_slice_idle; 2971 cfqd->cfq_latency = 1; 2972 cfqd->hw_tag = -1; 2973 cfqd->last_end_sync_rq = jiffies; 2974 return cfqd; 2975} 2976 2977static void cfq_slab_kill(void) 2978{ 2979 /* 2980 * Caller already ensured that pending RCU callbacks are completed, 2981 * so we should have no busy allocations at this point. 2982 */ 2983 if (cfq_pool) 2984 kmem_cache_destroy(cfq_pool); 2985 if (cfq_ioc_pool) 2986 kmem_cache_destroy(cfq_ioc_pool); 2987} 2988 2989static int __init cfq_slab_setup(void) 2990{ 2991 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2992 if (!cfq_pool) 2993 goto fail; 2994 2995 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); 2996 if (!cfq_ioc_pool) 2997 goto fail; 2998 2999 return 0; 3000fail: 3001 cfq_slab_kill(); 3002 return -ENOMEM; 3003} 3004 3005/* 3006 * sysfs parts below --> 3007 */ 3008static ssize_t 3009cfq_var_show(unsigned int var, char *page) 3010{ 3011 return sprintf(page, "%d\n", var); 3012} 3013 3014static ssize_t 3015cfq_var_store(unsigned int *var, const char *page, size_t count) 3016{ 3017 char *p = (char *) page; 3018 3019 *var = simple_strtoul(p, &p, 10); 3020 return count; 3021} 3022 3023#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 3024static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 3025{ \ 3026 struct cfq_data *cfqd = e->elevator_data; \ 3027 unsigned int __data = __VAR; \ 3028 if (__CONV) \ 3029 __data = jiffies_to_msecs(__data); \ 3030 return cfq_var_show(__data, (page)); \ 3031} 3032SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 3033SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 3034SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 3035SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 3036SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 3037SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 3038SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 3039SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 3040SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 3041SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 3042#undef SHOW_FUNCTION 3043 3044#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 3045static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 3046{ \ 3047 struct cfq_data *cfqd = e->elevator_data; \ 3048 unsigned int __data; \ 3049 int ret = cfq_var_store(&__data, (page), count); \ 3050 if (__data < (MIN)) \ 3051 __data = (MIN); \ 3052 else if (__data > (MAX)) \ 3053 __data = (MAX); \ 3054 if (__CONV) \ 3055 *(__PTR) = msecs_to_jiffies(__data); \ 3056 else \ 3057 *(__PTR) = __data; \ 3058 return ret; \ 3059} 3060STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 3061STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 3062 UINT_MAX, 1); 3063STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 3064 UINT_MAX, 1); 3065STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 3066STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 3067 UINT_MAX, 0); 3068STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 3069STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 3070STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 3071STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 3072 UINT_MAX, 0); 3073STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 3074#undef STORE_FUNCTION 3075 3076#define CFQ_ATTR(name) \ 3077 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 3078 3079static struct elv_fs_entry cfq_attrs[] = { 3080 CFQ_ATTR(quantum), 3081 CFQ_ATTR(fifo_expire_sync), 3082 CFQ_ATTR(fifo_expire_async), 3083 CFQ_ATTR(back_seek_max), 3084 CFQ_ATTR(back_seek_penalty), 3085 CFQ_ATTR(slice_sync), 3086 CFQ_ATTR(slice_async), 3087 CFQ_ATTR(slice_async_rq), 3088 CFQ_ATTR(slice_idle), 3089 CFQ_ATTR(low_latency), 3090 __ATTR_NULL 3091}; 3092 3093static struct elevator_type iosched_cfq = { 3094 .ops = { 3095 .elevator_merge_fn = cfq_merge, 3096 .elevator_merged_fn = cfq_merged_request, 3097 .elevator_merge_req_fn = cfq_merged_requests, 3098 .elevator_allow_merge_fn = cfq_allow_merge, 3099 .elevator_dispatch_fn = cfq_dispatch_requests, 3100 .elevator_add_req_fn = cfq_insert_request, 3101 .elevator_activate_req_fn = cfq_activate_request, 3102 .elevator_deactivate_req_fn = cfq_deactivate_request, 3103 .elevator_queue_empty_fn = cfq_queue_empty, 3104 .elevator_completed_req_fn = cfq_completed_request, 3105 .elevator_former_req_fn = elv_rb_former_request, 3106 .elevator_latter_req_fn = elv_rb_latter_request, 3107 .elevator_set_req_fn = cfq_set_request, 3108 .elevator_put_req_fn = cfq_put_request, 3109 .elevator_may_queue_fn = cfq_may_queue, 3110 .elevator_init_fn = cfq_init_queue, 3111 .elevator_exit_fn = cfq_exit_queue, 3112 .trim = cfq_free_io_context, 3113 }, 3114 .elevator_attrs = cfq_attrs, 3115 .elevator_name = "cfq", 3116 .elevator_owner = THIS_MODULE, 3117}; 3118 3119static int __init cfq_init(void) 3120{ 3121 /* 3122 * could be 0 on HZ < 1000 setups 3123 */ 3124 if (!cfq_slice_async) 3125 cfq_slice_async = 1; 3126 if (!cfq_slice_idle) 3127 cfq_slice_idle = 1; 3128 3129 if (cfq_slab_setup()) 3130 return -ENOMEM; 3131 3132 elv_register(&iosched_cfq); 3133 3134 return 0; 3135} 3136 3137static void __exit cfq_exit(void) 3138{ 3139 DECLARE_COMPLETION_ONSTACK(all_gone); 3140 elv_unregister(&iosched_cfq); 3141 ioc_gone = &all_gone; 3142 /* ioc_gone's update must be visible before reading ioc_count */ 3143 smp_wmb(); 3144 3145 /* 3146 * this also protects us from entering cfq_slab_kill() with 3147 * pending RCU callbacks 3148 */ 3149 if (elv_ioc_count_read(cfq_ioc_count)) 3150 wait_for_completion(&all_gone); 3151 cfq_slab_kill(); 3152} 3153 3154module_init(cfq_init); 3155module_exit(cfq_exit); 3156 3157MODULE_AUTHOR("Jens Axboe"); 3158MODULE_LICENSE("GPL"); 3159MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 3160