cfq-iosched.c revision ffc4e7595734cf768fa60cea8a4d545dfef8231a
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/rbtree.h> 13#include <linux/ioprio.h> 14 15/* 16 * tunables 17 */ 18/* max queue in one round of service */ 19static const int cfq_quantum = 4; 20static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 21/* maximum backwards seek, in KiB */ 22static const int cfq_back_max = 16 * 1024; 23/* penalty of a backwards seek */ 24static const int cfq_back_penalty = 2; 25static const int cfq_slice_sync = HZ / 10; 26static int cfq_slice_async = HZ / 25; 27static const int cfq_slice_async_rq = 2; 28static int cfq_slice_idle = HZ / 125; 29 30/* 31 * offset from end of service tree 32 */ 33#define CFQ_IDLE_DELAY (HZ / 5) 34 35/* 36 * below this threshold, we consider thinktime immediate 37 */ 38#define CFQ_MIN_TT (2) 39 40#define CFQ_SLICE_SCALE (5) 41 42#define RQ_CIC(rq) \ 43 ((struct cfq_io_context *) (rq)->elevator_private) 44#define RQ_CFQQ(rq) ((rq)->elevator_private2) 45 46static struct kmem_cache *cfq_pool; 47static struct kmem_cache *cfq_ioc_pool; 48 49static DEFINE_PER_CPU(unsigned long, ioc_count); 50static struct completion *ioc_gone; 51 52#define CFQ_PRIO_LISTS IOPRIO_BE_NR 53#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 54#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 55 56#define ASYNC (0) 57#define SYNC (1) 58 59#define sample_valid(samples) ((samples) > 80) 60 61/* 62 * Most of our rbtree usage is for sorting with min extraction, so 63 * if we cache the leftmost node we don't have to walk down the tree 64 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 65 * move this into the elevator for the rq sorting as well. 66 */ 67struct cfq_rb_root { 68 struct rb_root rb; 69 struct rb_node *left; 70}; 71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 72 73/* 74 * Per block device queue structure 75 */ 76struct cfq_data { 77 struct request_queue *queue; 78 79 /* 80 * rr list of queues with requests and the count of them 81 */ 82 struct cfq_rb_root service_tree; 83 unsigned int busy_queues; 84 85 int rq_in_driver; 86 int sync_flight; 87 int hw_tag; 88 89 /* 90 * idle window management 91 */ 92 struct timer_list idle_slice_timer; 93 struct work_struct unplug_work; 94 95 struct cfq_queue *active_queue; 96 struct cfq_io_context *active_cic; 97 98 /* 99 * async queue for each priority case 100 */ 101 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 102 struct cfq_queue *async_idle_cfqq; 103 104 sector_t last_position; 105 unsigned long last_end_request; 106 107 /* 108 * tunables, see top of file 109 */ 110 unsigned int cfq_quantum; 111 unsigned int cfq_fifo_expire[2]; 112 unsigned int cfq_back_penalty; 113 unsigned int cfq_back_max; 114 unsigned int cfq_slice[2]; 115 unsigned int cfq_slice_async_rq; 116 unsigned int cfq_slice_idle; 117 118 struct list_head cic_list; 119}; 120 121/* 122 * Per process-grouping structure 123 */ 124struct cfq_queue { 125 /* reference count */ 126 atomic_t ref; 127 /* parent cfq_data */ 128 struct cfq_data *cfqd; 129 /* service_tree member */ 130 struct rb_node rb_node; 131 /* service_tree key */ 132 unsigned long rb_key; 133 /* sorted list of pending requests */ 134 struct rb_root sort_list; 135 /* if fifo isn't expired, next request to serve */ 136 struct request *next_rq; 137 /* requests queued in sort_list */ 138 int queued[2]; 139 /* currently allocated requests */ 140 int allocated[2]; 141 /* pending metadata requests */ 142 int meta_pending; 143 /* fifo list of requests in sort_list */ 144 struct list_head fifo; 145 146 unsigned long slice_end; 147 long slice_resid; 148 149 /* number of requests that are on the dispatch list or inside driver */ 150 int dispatched; 151 152 /* io prio of this group */ 153 unsigned short ioprio, org_ioprio; 154 unsigned short ioprio_class, org_ioprio_class; 155 156 /* various state flags, see below */ 157 unsigned int flags; 158}; 159 160enum cfqq_state_flags { 161 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 162 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 163 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 164 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 165 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ 166 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 167 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 168 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 169 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ 170 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 171 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 172}; 173 174#define CFQ_CFQQ_FNS(name) \ 175static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 176{ \ 177 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 178} \ 179static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 180{ \ 181 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 182} \ 183static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 184{ \ 185 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 186} 187 188CFQ_CFQQ_FNS(on_rr); 189CFQ_CFQQ_FNS(wait_request); 190CFQ_CFQQ_FNS(must_alloc); 191CFQ_CFQQ_FNS(must_alloc_slice); 192CFQ_CFQQ_FNS(must_dispatch); 193CFQ_CFQQ_FNS(fifo_expire); 194CFQ_CFQQ_FNS(idle_window); 195CFQ_CFQQ_FNS(prio_changed); 196CFQ_CFQQ_FNS(queue_new); 197CFQ_CFQQ_FNS(slice_new); 198CFQ_CFQQ_FNS(sync); 199#undef CFQ_CFQQ_FNS 200 201static void cfq_dispatch_insert(struct request_queue *, struct request *); 202static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 203 struct io_context *, gfp_t); 204static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 205 struct io_context *); 206 207static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 208 int is_sync) 209{ 210 return cic->cfqq[!!is_sync]; 211} 212 213static inline void cic_set_cfqq(struct cfq_io_context *cic, 214 struct cfq_queue *cfqq, int is_sync) 215{ 216 cic->cfqq[!!is_sync] = cfqq; 217} 218 219/* 220 * We regard a request as SYNC, if it's either a read or has the SYNC bit 221 * set (in which case it could also be direct WRITE). 222 */ 223static inline int cfq_bio_sync(struct bio *bio) 224{ 225 if (bio_data_dir(bio) == READ || bio_sync(bio)) 226 return 1; 227 228 return 0; 229} 230 231/* 232 * scheduler run of queue, if there are requests pending and no one in the 233 * driver that will restart queueing 234 */ 235static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 236{ 237 if (cfqd->busy_queues) 238 kblockd_schedule_work(&cfqd->unplug_work); 239} 240 241static int cfq_queue_empty(struct request_queue *q) 242{ 243 struct cfq_data *cfqd = q->elevator->elevator_data; 244 245 return !cfqd->busy_queues; 246} 247 248/* 249 * Scale schedule slice based on io priority. Use the sync time slice only 250 * if a queue is marked sync and has sync io queued. A sync queue with async 251 * io only, should not get full sync slice length. 252 */ 253static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 254 unsigned short prio) 255{ 256 const int base_slice = cfqd->cfq_slice[sync]; 257 258 WARN_ON(prio >= IOPRIO_BE_NR); 259 260 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 261} 262 263static inline int 264cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 265{ 266 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 267} 268 269static inline void 270cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 271{ 272 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 273} 274 275/* 276 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 277 * isn't valid until the first request from the dispatch is activated 278 * and the slice time set. 279 */ 280static inline int cfq_slice_used(struct cfq_queue *cfqq) 281{ 282 if (cfq_cfqq_slice_new(cfqq)) 283 return 0; 284 if (time_before(jiffies, cfqq->slice_end)) 285 return 0; 286 287 return 1; 288} 289 290/* 291 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 292 * We choose the request that is closest to the head right now. Distance 293 * behind the head is penalized and only allowed to a certain extent. 294 */ 295static struct request * 296cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 297{ 298 sector_t last, s1, s2, d1 = 0, d2 = 0; 299 unsigned long back_max; 300#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 301#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 302 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 303 304 if (rq1 == NULL || rq1 == rq2) 305 return rq2; 306 if (rq2 == NULL) 307 return rq1; 308 309 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 310 return rq1; 311 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 312 return rq2; 313 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 314 return rq1; 315 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 316 return rq2; 317 318 s1 = rq1->sector; 319 s2 = rq2->sector; 320 321 last = cfqd->last_position; 322 323 /* 324 * by definition, 1KiB is 2 sectors 325 */ 326 back_max = cfqd->cfq_back_max * 2; 327 328 /* 329 * Strict one way elevator _except_ in the case where we allow 330 * short backward seeks which are biased as twice the cost of a 331 * similar forward seek. 332 */ 333 if (s1 >= last) 334 d1 = s1 - last; 335 else if (s1 + back_max >= last) 336 d1 = (last - s1) * cfqd->cfq_back_penalty; 337 else 338 wrap |= CFQ_RQ1_WRAP; 339 340 if (s2 >= last) 341 d2 = s2 - last; 342 else if (s2 + back_max >= last) 343 d2 = (last - s2) * cfqd->cfq_back_penalty; 344 else 345 wrap |= CFQ_RQ2_WRAP; 346 347 /* Found required data */ 348 349 /* 350 * By doing switch() on the bit mask "wrap" we avoid having to 351 * check two variables for all permutations: --> faster! 352 */ 353 switch (wrap) { 354 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 355 if (d1 < d2) 356 return rq1; 357 else if (d2 < d1) 358 return rq2; 359 else { 360 if (s1 >= s2) 361 return rq1; 362 else 363 return rq2; 364 } 365 366 case CFQ_RQ2_WRAP: 367 return rq1; 368 case CFQ_RQ1_WRAP: 369 return rq2; 370 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 371 default: 372 /* 373 * Since both rqs are wrapped, 374 * start with the one that's further behind head 375 * (--> only *one* back seek required), 376 * since back seek takes more time than forward. 377 */ 378 if (s1 <= s2) 379 return rq1; 380 else 381 return rq2; 382 } 383} 384 385/* 386 * The below is leftmost cache rbtree addon 387 */ 388static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 389{ 390 if (!root->left) 391 root->left = rb_first(&root->rb); 392 393 if (root->left) 394 return rb_entry(root->left, struct cfq_queue, rb_node); 395 396 return NULL; 397} 398 399static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 400{ 401 if (root->left == n) 402 root->left = NULL; 403 404 rb_erase(n, &root->rb); 405 RB_CLEAR_NODE(n); 406} 407 408/* 409 * would be nice to take fifo expire time into account as well 410 */ 411static struct request * 412cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 413 struct request *last) 414{ 415 struct rb_node *rbnext = rb_next(&last->rb_node); 416 struct rb_node *rbprev = rb_prev(&last->rb_node); 417 struct request *next = NULL, *prev = NULL; 418 419 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 420 421 if (rbprev) 422 prev = rb_entry_rq(rbprev); 423 424 if (rbnext) 425 next = rb_entry_rq(rbnext); 426 else { 427 rbnext = rb_first(&cfqq->sort_list); 428 if (rbnext && rbnext != &last->rb_node) 429 next = rb_entry_rq(rbnext); 430 } 431 432 return cfq_choose_req(cfqd, next, prev); 433} 434 435static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 436 struct cfq_queue *cfqq) 437{ 438 /* 439 * just an approximation, should be ok. 440 */ 441 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - 442 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 443} 444 445/* 446 * The cfqd->service_tree holds all pending cfq_queue's that have 447 * requests waiting to be processed. It is sorted in the order that 448 * we will service the queues. 449 */ 450static void cfq_service_tree_add(struct cfq_data *cfqd, 451 struct cfq_queue *cfqq, int add_front) 452{ 453 struct rb_node **p, *parent; 454 struct cfq_queue *__cfqq; 455 unsigned long rb_key; 456 int left; 457 458 if (cfq_class_idle(cfqq)) { 459 rb_key = CFQ_IDLE_DELAY; 460 parent = rb_last(&cfqd->service_tree.rb); 461 if (parent && parent != &cfqq->rb_node) { 462 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 463 rb_key += __cfqq->rb_key; 464 } else 465 rb_key += jiffies; 466 } else if (!add_front) { 467 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 468 rb_key += cfqq->slice_resid; 469 cfqq->slice_resid = 0; 470 } else 471 rb_key = 0; 472 473 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 474 /* 475 * same position, nothing more to do 476 */ 477 if (rb_key == cfqq->rb_key) 478 return; 479 480 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 481 } 482 483 left = 1; 484 parent = NULL; 485 p = &cfqd->service_tree.rb.rb_node; 486 while (*p) { 487 struct rb_node **n; 488 489 parent = *p; 490 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 491 492 /* 493 * sort RT queues first, we always want to give 494 * preference to them. IDLE queues goes to the back. 495 * after that, sort on the next service time. 496 */ 497 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 498 n = &(*p)->rb_left; 499 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 500 n = &(*p)->rb_right; 501 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) 502 n = &(*p)->rb_left; 503 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 504 n = &(*p)->rb_right; 505 else if (rb_key < __cfqq->rb_key) 506 n = &(*p)->rb_left; 507 else 508 n = &(*p)->rb_right; 509 510 if (n == &(*p)->rb_right) 511 left = 0; 512 513 p = n; 514 } 515 516 if (left) 517 cfqd->service_tree.left = &cfqq->rb_node; 518 519 cfqq->rb_key = rb_key; 520 rb_link_node(&cfqq->rb_node, parent, p); 521 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 522} 523 524/* 525 * Update cfqq's position in the service tree. 526 */ 527static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 528{ 529 /* 530 * Resorting requires the cfqq to be on the RR list already. 531 */ 532 if (cfq_cfqq_on_rr(cfqq)) 533 cfq_service_tree_add(cfqd, cfqq, 0); 534} 535 536/* 537 * add to busy list of queues for service, trying to be fair in ordering 538 * the pending list according to last request service 539 */ 540static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 541{ 542 BUG_ON(cfq_cfqq_on_rr(cfqq)); 543 cfq_mark_cfqq_on_rr(cfqq); 544 cfqd->busy_queues++; 545 546 cfq_resort_rr_list(cfqd, cfqq); 547} 548 549/* 550 * Called when the cfqq no longer has requests pending, remove it from 551 * the service tree. 552 */ 553static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 554{ 555 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 556 cfq_clear_cfqq_on_rr(cfqq); 557 558 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 559 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 560 561 BUG_ON(!cfqd->busy_queues); 562 cfqd->busy_queues--; 563} 564 565/* 566 * rb tree support functions 567 */ 568static void cfq_del_rq_rb(struct request *rq) 569{ 570 struct cfq_queue *cfqq = RQ_CFQQ(rq); 571 struct cfq_data *cfqd = cfqq->cfqd; 572 const int sync = rq_is_sync(rq); 573 574 BUG_ON(!cfqq->queued[sync]); 575 cfqq->queued[sync]--; 576 577 elv_rb_del(&cfqq->sort_list, rq); 578 579 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 580 cfq_del_cfqq_rr(cfqd, cfqq); 581} 582 583static void cfq_add_rq_rb(struct request *rq) 584{ 585 struct cfq_queue *cfqq = RQ_CFQQ(rq); 586 struct cfq_data *cfqd = cfqq->cfqd; 587 struct request *__alias; 588 589 cfqq->queued[rq_is_sync(rq)]++; 590 591 /* 592 * looks a little odd, but the first insert might return an alias. 593 * if that happens, put the alias on the dispatch list 594 */ 595 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 596 cfq_dispatch_insert(cfqd->queue, __alias); 597 598 if (!cfq_cfqq_on_rr(cfqq)) 599 cfq_add_cfqq_rr(cfqd, cfqq); 600 601 /* 602 * check if this request is a better next-serve candidate 603 */ 604 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 605 BUG_ON(!cfqq->next_rq); 606} 607 608static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 609{ 610 elv_rb_del(&cfqq->sort_list, rq); 611 cfqq->queued[rq_is_sync(rq)]--; 612 cfq_add_rq_rb(rq); 613} 614 615static struct request * 616cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 617{ 618 struct task_struct *tsk = current; 619 struct cfq_io_context *cic; 620 struct cfq_queue *cfqq; 621 622 cic = cfq_cic_lookup(cfqd, tsk->io_context); 623 if (!cic) 624 return NULL; 625 626 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 627 if (cfqq) { 628 sector_t sector = bio->bi_sector + bio_sectors(bio); 629 630 return elv_rb_find(&cfqq->sort_list, sector); 631 } 632 633 return NULL; 634} 635 636static void cfq_activate_request(struct request_queue *q, struct request *rq) 637{ 638 struct cfq_data *cfqd = q->elevator->elevator_data; 639 640 cfqd->rq_in_driver++; 641 642 /* 643 * If the depth is larger 1, it really could be queueing. But lets 644 * make the mark a little higher - idling could still be good for 645 * low queueing, and a low queueing number could also just indicate 646 * a SCSI mid layer like behaviour where limit+1 is often seen. 647 */ 648 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 649 cfqd->hw_tag = 1; 650 651 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 652} 653 654static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 655{ 656 struct cfq_data *cfqd = q->elevator->elevator_data; 657 658 WARN_ON(!cfqd->rq_in_driver); 659 cfqd->rq_in_driver--; 660} 661 662static void cfq_remove_request(struct request *rq) 663{ 664 struct cfq_queue *cfqq = RQ_CFQQ(rq); 665 666 if (cfqq->next_rq == rq) 667 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 668 669 list_del_init(&rq->queuelist); 670 cfq_del_rq_rb(rq); 671 672 if (rq_is_meta(rq)) { 673 WARN_ON(!cfqq->meta_pending); 674 cfqq->meta_pending--; 675 } 676} 677 678static int cfq_merge(struct request_queue *q, struct request **req, 679 struct bio *bio) 680{ 681 struct cfq_data *cfqd = q->elevator->elevator_data; 682 struct request *__rq; 683 684 __rq = cfq_find_rq_fmerge(cfqd, bio); 685 if (__rq && elv_rq_merge_ok(__rq, bio)) { 686 *req = __rq; 687 return ELEVATOR_FRONT_MERGE; 688 } 689 690 return ELEVATOR_NO_MERGE; 691} 692 693static void cfq_merged_request(struct request_queue *q, struct request *req, 694 int type) 695{ 696 if (type == ELEVATOR_FRONT_MERGE) { 697 struct cfq_queue *cfqq = RQ_CFQQ(req); 698 699 cfq_reposition_rq_rb(cfqq, req); 700 } 701} 702 703static void 704cfq_merged_requests(struct request_queue *q, struct request *rq, 705 struct request *next) 706{ 707 /* 708 * reposition in fifo if next is older than rq 709 */ 710 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 711 time_before(next->start_time, rq->start_time)) 712 list_move(&rq->queuelist, &next->queuelist); 713 714 cfq_remove_request(next); 715} 716 717static int cfq_allow_merge(struct request_queue *q, struct request *rq, 718 struct bio *bio) 719{ 720 struct cfq_data *cfqd = q->elevator->elevator_data; 721 struct cfq_io_context *cic; 722 struct cfq_queue *cfqq; 723 724 /* 725 * Disallow merge of a sync bio into an async request. 726 */ 727 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 728 return 0; 729 730 /* 731 * Lookup the cfqq that this bio will be queued with. Allow 732 * merge only if rq is queued there. 733 */ 734 cic = cfq_cic_lookup(cfqd, current->io_context); 735 if (!cic) 736 return 0; 737 738 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 739 if (cfqq == RQ_CFQQ(rq)) 740 return 1; 741 742 return 0; 743} 744 745static void __cfq_set_active_queue(struct cfq_data *cfqd, 746 struct cfq_queue *cfqq) 747{ 748 if (cfqq) { 749 cfqq->slice_end = 0; 750 cfq_clear_cfqq_must_alloc_slice(cfqq); 751 cfq_clear_cfqq_fifo_expire(cfqq); 752 cfq_mark_cfqq_slice_new(cfqq); 753 cfq_clear_cfqq_queue_new(cfqq); 754 } 755 756 cfqd->active_queue = cfqq; 757} 758 759/* 760 * current cfqq expired its slice (or was too idle), select new one 761 */ 762static void 763__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 764 int timed_out) 765{ 766 if (cfq_cfqq_wait_request(cfqq)) 767 del_timer(&cfqd->idle_slice_timer); 768 769 cfq_clear_cfqq_must_dispatch(cfqq); 770 cfq_clear_cfqq_wait_request(cfqq); 771 772 /* 773 * store what was left of this slice, if the queue idled/timed out 774 */ 775 if (timed_out && !cfq_cfqq_slice_new(cfqq)) 776 cfqq->slice_resid = cfqq->slice_end - jiffies; 777 778 cfq_resort_rr_list(cfqd, cfqq); 779 780 if (cfqq == cfqd->active_queue) 781 cfqd->active_queue = NULL; 782 783 if (cfqd->active_cic) { 784 put_io_context(cfqd->active_cic->ioc); 785 cfqd->active_cic = NULL; 786 } 787} 788 789static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 790{ 791 struct cfq_queue *cfqq = cfqd->active_queue; 792 793 if (cfqq) 794 __cfq_slice_expired(cfqd, cfqq, timed_out); 795} 796 797/* 798 * Get next queue for service. Unless we have a queue preemption, 799 * we'll simply select the first cfqq in the service tree. 800 */ 801static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 802{ 803 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 804 return NULL; 805 806 return cfq_rb_first(&cfqd->service_tree); 807} 808 809/* 810 * Get and set a new active queue for service. 811 */ 812static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 813{ 814 struct cfq_queue *cfqq; 815 816 cfqq = cfq_get_next_queue(cfqd); 817 __cfq_set_active_queue(cfqd, cfqq); 818 return cfqq; 819} 820 821static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 822 struct request *rq) 823{ 824 if (rq->sector >= cfqd->last_position) 825 return rq->sector - cfqd->last_position; 826 else 827 return cfqd->last_position - rq->sector; 828} 829 830static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 831{ 832 struct cfq_io_context *cic = cfqd->active_cic; 833 834 if (!sample_valid(cic->seek_samples)) 835 return 0; 836 837 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 838} 839 840static int cfq_close_cooperator(struct cfq_data *cfq_data, 841 struct cfq_queue *cfqq) 842{ 843 /* 844 * We should notice if some of the queues are cooperating, eg 845 * working closely on the same area of the disk. In that case, 846 * we can group them together and don't waste time idling. 847 */ 848 return 0; 849} 850 851#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 852 853static void cfq_arm_slice_timer(struct cfq_data *cfqd) 854{ 855 struct cfq_queue *cfqq = cfqd->active_queue; 856 struct cfq_io_context *cic; 857 unsigned long sl; 858 859 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 860 WARN_ON(cfq_cfqq_slice_new(cfqq)); 861 862 /* 863 * idle is disabled, either manually or by past process history 864 */ 865 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 866 return; 867 868 /* 869 * task has exited, don't wait 870 */ 871 cic = cfqd->active_cic; 872 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 873 return; 874 875 /* 876 * See if this prio level has a good candidate 877 */ 878 if (cfq_close_cooperator(cfqd, cfqq) && 879 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 880 return; 881 882 cfq_mark_cfqq_must_dispatch(cfqq); 883 cfq_mark_cfqq_wait_request(cfqq); 884 885 /* 886 * we don't want to idle for seeks, but we do want to allow 887 * fair distribution of slice time for a process doing back-to-back 888 * seeks. so allow a little bit of time for him to submit a new rq 889 */ 890 sl = cfqd->cfq_slice_idle; 891 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 892 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 893 894 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 895} 896 897/* 898 * Move request from internal lists to the request queue dispatch list. 899 */ 900static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 901{ 902 struct cfq_data *cfqd = q->elevator->elevator_data; 903 struct cfq_queue *cfqq = RQ_CFQQ(rq); 904 905 cfq_remove_request(rq); 906 cfqq->dispatched++; 907 elv_dispatch_sort(q, rq); 908 909 if (cfq_cfqq_sync(cfqq)) 910 cfqd->sync_flight++; 911} 912 913/* 914 * return expired entry, or NULL to just start from scratch in rbtree 915 */ 916static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 917{ 918 struct cfq_data *cfqd = cfqq->cfqd; 919 struct request *rq; 920 int fifo; 921 922 if (cfq_cfqq_fifo_expire(cfqq)) 923 return NULL; 924 925 cfq_mark_cfqq_fifo_expire(cfqq); 926 927 if (list_empty(&cfqq->fifo)) 928 return NULL; 929 930 fifo = cfq_cfqq_sync(cfqq); 931 rq = rq_entry_fifo(cfqq->fifo.next); 932 933 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 934 return NULL; 935 936 return rq; 937} 938 939static inline int 940cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 941{ 942 const int base_rq = cfqd->cfq_slice_async_rq; 943 944 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 945 946 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 947} 948 949/* 950 * Select a queue for service. If we have a current active queue, 951 * check whether to continue servicing it, or retrieve and set a new one. 952 */ 953static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 954{ 955 struct cfq_queue *cfqq; 956 957 cfqq = cfqd->active_queue; 958 if (!cfqq) 959 goto new_queue; 960 961 /* 962 * The active queue has run out of time, expire it and select new. 963 */ 964 if (cfq_slice_used(cfqq)) 965 goto expire; 966 967 /* 968 * The active queue has requests and isn't expired, allow it to 969 * dispatch. 970 */ 971 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 972 goto keep_queue; 973 974 /* 975 * No requests pending. If the active queue still has requests in 976 * flight or is idling for a new request, allow either of these 977 * conditions to happen (or time out) before selecting a new queue. 978 */ 979 if (timer_pending(&cfqd->idle_slice_timer) || 980 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 981 cfqq = NULL; 982 goto keep_queue; 983 } 984 985expire: 986 cfq_slice_expired(cfqd, 0); 987new_queue: 988 cfqq = cfq_set_active_queue(cfqd); 989keep_queue: 990 return cfqq; 991} 992 993/* 994 * Dispatch some requests from cfqq, moving them to the request queue 995 * dispatch list. 996 */ 997static int 998__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, 999 int max_dispatch) 1000{ 1001 int dispatched = 0; 1002 1003 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1004 1005 do { 1006 struct request *rq; 1007 1008 /* 1009 * follow expired path, else get first next available 1010 */ 1011 rq = cfq_check_fifo(cfqq); 1012 if (rq == NULL) 1013 rq = cfqq->next_rq; 1014 1015 /* 1016 * finally, insert request into driver dispatch list 1017 */ 1018 cfq_dispatch_insert(cfqd->queue, rq); 1019 1020 dispatched++; 1021 1022 if (!cfqd->active_cic) { 1023 atomic_inc(&RQ_CIC(rq)->ioc->refcount); 1024 cfqd->active_cic = RQ_CIC(rq); 1025 } 1026 1027 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 1028 break; 1029 1030 } while (dispatched < max_dispatch); 1031 1032 /* 1033 * expire an async queue immediately if it has used up its slice. idle 1034 * queue always expire after 1 dispatch round. 1035 */ 1036 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1037 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1038 cfq_class_idle(cfqq))) { 1039 cfqq->slice_end = jiffies + 1; 1040 cfq_slice_expired(cfqd, 0); 1041 } 1042 1043 return dispatched; 1044} 1045 1046static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1047{ 1048 int dispatched = 0; 1049 1050 while (cfqq->next_rq) { 1051 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1052 dispatched++; 1053 } 1054 1055 BUG_ON(!list_empty(&cfqq->fifo)); 1056 return dispatched; 1057} 1058 1059/* 1060 * Drain our current requests. Used for barriers and when switching 1061 * io schedulers on-the-fly. 1062 */ 1063static int cfq_forced_dispatch(struct cfq_data *cfqd) 1064{ 1065 struct cfq_queue *cfqq; 1066 int dispatched = 0; 1067 1068 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1069 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1070 1071 cfq_slice_expired(cfqd, 0); 1072 1073 BUG_ON(cfqd->busy_queues); 1074 1075 return dispatched; 1076} 1077 1078static int cfq_dispatch_requests(struct request_queue *q, int force) 1079{ 1080 struct cfq_data *cfqd = q->elevator->elevator_data; 1081 struct cfq_queue *cfqq; 1082 int dispatched; 1083 1084 if (!cfqd->busy_queues) 1085 return 0; 1086 1087 if (unlikely(force)) 1088 return cfq_forced_dispatch(cfqd); 1089 1090 dispatched = 0; 1091 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1092 int max_dispatch; 1093 1094 max_dispatch = cfqd->cfq_quantum; 1095 if (cfq_class_idle(cfqq)) 1096 max_dispatch = 1; 1097 1098 if (cfqq->dispatched >= max_dispatch) { 1099 if (cfqd->busy_queues > 1) 1100 break; 1101 if (cfqq->dispatched >= 4 * max_dispatch) 1102 break; 1103 } 1104 1105 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1106 break; 1107 1108 cfq_clear_cfqq_must_dispatch(cfqq); 1109 cfq_clear_cfqq_wait_request(cfqq); 1110 del_timer(&cfqd->idle_slice_timer); 1111 1112 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1113 } 1114 1115 return dispatched; 1116} 1117 1118/* 1119 * task holds one reference to the queue, dropped when task exits. each rq 1120 * in-flight on this queue also holds a reference, dropped when rq is freed. 1121 * 1122 * queue lock must be held here. 1123 */ 1124static void cfq_put_queue(struct cfq_queue *cfqq) 1125{ 1126 struct cfq_data *cfqd = cfqq->cfqd; 1127 1128 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1129 1130 if (!atomic_dec_and_test(&cfqq->ref)) 1131 return; 1132 1133 BUG_ON(rb_first(&cfqq->sort_list)); 1134 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1135 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1136 1137 if (unlikely(cfqd->active_queue == cfqq)) { 1138 __cfq_slice_expired(cfqd, cfqq, 0); 1139 cfq_schedule_dispatch(cfqd); 1140 } 1141 1142 kmem_cache_free(cfq_pool, cfqq); 1143} 1144 1145/* 1146 * Call func for each cic attached to this ioc. Returns number of cic's seen. 1147 */ 1148static unsigned int 1149call_for_each_cic(struct io_context *ioc, 1150 void (*func)(struct io_context *, struct cfq_io_context *)) 1151{ 1152 struct cfq_io_context *cic; 1153 struct hlist_node *n; 1154 int called = 0; 1155 1156 rcu_read_lock(); 1157 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) { 1158 func(ioc, cic); 1159 called++; 1160 } 1161 rcu_read_unlock(); 1162 1163 return called; 1164} 1165 1166static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 1167{ 1168 unsigned long flags; 1169 1170 BUG_ON(!cic->dead_key); 1171 1172 spin_lock_irqsave(&ioc->lock, flags); 1173 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1174 hlist_del_rcu(&cic->cic_list); 1175 spin_unlock_irqrestore(&ioc->lock, flags); 1176 1177 kmem_cache_free(cfq_ioc_pool, cic); 1178} 1179 1180static void cfq_free_io_context(struct io_context *ioc) 1181{ 1182 int freed; 1183 1184 /* 1185 * ioc->refcount is zero here, so no more cic's are allowed to be 1186 * linked into this ioc. So it should be ok to iterate over the known 1187 * list, we will see all cic's since no new ones are added. 1188 */ 1189 freed = call_for_each_cic(ioc, cic_free_func); 1190 1191 elv_ioc_count_mod(ioc_count, -freed); 1192 1193 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 1194 complete(ioc_gone); 1195} 1196 1197static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1198{ 1199 if (unlikely(cfqq == cfqd->active_queue)) { 1200 __cfq_slice_expired(cfqd, cfqq, 0); 1201 cfq_schedule_dispatch(cfqd); 1202 } 1203 1204 cfq_put_queue(cfqq); 1205} 1206 1207static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1208 struct cfq_io_context *cic) 1209{ 1210 list_del_init(&cic->queue_list); 1211 1212 /* 1213 * Make sure key == NULL is seen for dead queues 1214 */ 1215 smp_wmb(); 1216 cic->dead_key = (unsigned long) cic->key; 1217 cic->key = NULL; 1218 1219 if (cic->cfqq[ASYNC]) { 1220 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1221 cic->cfqq[ASYNC] = NULL; 1222 } 1223 1224 if (cic->cfqq[SYNC]) { 1225 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1226 cic->cfqq[SYNC] = NULL; 1227 } 1228} 1229 1230static void cfq_exit_single_io_context(struct io_context *ioc, 1231 struct cfq_io_context *cic) 1232{ 1233 struct cfq_data *cfqd = cic->key; 1234 1235 if (cfqd) { 1236 struct request_queue *q = cfqd->queue; 1237 unsigned long flags; 1238 1239 spin_lock_irqsave(q->queue_lock, flags); 1240 __cfq_exit_single_io_context(cfqd, cic); 1241 spin_unlock_irqrestore(q->queue_lock, flags); 1242 } 1243} 1244 1245/* 1246 * The process that ioc belongs to has exited, we need to clean up 1247 * and put the internal structures we have that belongs to that process. 1248 */ 1249static void cfq_exit_io_context(struct io_context *ioc) 1250{ 1251 rcu_assign_pointer(ioc->ioc_data, NULL); 1252 call_for_each_cic(ioc, cfq_exit_single_io_context); 1253} 1254 1255static struct cfq_io_context * 1256cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1257{ 1258 struct cfq_io_context *cic; 1259 1260 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, 1261 cfqd->queue->node); 1262 if (cic) { 1263 cic->last_end_request = jiffies; 1264 INIT_LIST_HEAD(&cic->queue_list); 1265 INIT_HLIST_NODE(&cic->cic_list); 1266 cic->dtor = cfq_free_io_context; 1267 cic->exit = cfq_exit_io_context; 1268 elv_ioc_count_inc(ioc_count); 1269 } 1270 1271 return cic; 1272} 1273 1274static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 1275{ 1276 struct task_struct *tsk = current; 1277 int ioprio_class; 1278 1279 if (!cfq_cfqq_prio_changed(cfqq)) 1280 return; 1281 1282 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1283 switch (ioprio_class) { 1284 default: 1285 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1286 case IOPRIO_CLASS_NONE: 1287 /* 1288 * no prio set, place us in the middle of the BE classes 1289 */ 1290 cfqq->ioprio = task_nice_ioprio(tsk); 1291 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1292 break; 1293 case IOPRIO_CLASS_RT: 1294 cfqq->ioprio = task_ioprio(ioc); 1295 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1296 break; 1297 case IOPRIO_CLASS_BE: 1298 cfqq->ioprio = task_ioprio(ioc); 1299 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1300 break; 1301 case IOPRIO_CLASS_IDLE: 1302 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1303 cfqq->ioprio = 7; 1304 cfq_clear_cfqq_idle_window(cfqq); 1305 break; 1306 } 1307 1308 /* 1309 * keep track of original prio settings in case we have to temporarily 1310 * elevate the priority of this queue 1311 */ 1312 cfqq->org_ioprio = cfqq->ioprio; 1313 cfqq->org_ioprio_class = cfqq->ioprio_class; 1314 cfq_clear_cfqq_prio_changed(cfqq); 1315} 1316 1317static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 1318{ 1319 struct cfq_data *cfqd = cic->key; 1320 struct cfq_queue *cfqq; 1321 unsigned long flags; 1322 1323 if (unlikely(!cfqd)) 1324 return; 1325 1326 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1327 1328 cfqq = cic->cfqq[ASYNC]; 1329 if (cfqq) { 1330 struct cfq_queue *new_cfqq; 1331 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); 1332 if (new_cfqq) { 1333 cic->cfqq[ASYNC] = new_cfqq; 1334 cfq_put_queue(cfqq); 1335 } 1336 } 1337 1338 cfqq = cic->cfqq[SYNC]; 1339 if (cfqq) 1340 cfq_mark_cfqq_prio_changed(cfqq); 1341 1342 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1343} 1344 1345static void cfq_ioc_set_ioprio(struct io_context *ioc) 1346{ 1347 call_for_each_cic(ioc, changed_ioprio); 1348 ioc->ioprio_changed = 0; 1349} 1350 1351static struct cfq_queue * 1352cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1353 struct io_context *ioc, gfp_t gfp_mask) 1354{ 1355 struct cfq_queue *cfqq, *new_cfqq = NULL; 1356 struct cfq_io_context *cic; 1357 1358retry: 1359 cic = cfq_cic_lookup(cfqd, ioc); 1360 /* cic always exists here */ 1361 cfqq = cic_to_cfqq(cic, is_sync); 1362 1363 if (!cfqq) { 1364 if (new_cfqq) { 1365 cfqq = new_cfqq; 1366 new_cfqq = NULL; 1367 } else if (gfp_mask & __GFP_WAIT) { 1368 /* 1369 * Inform the allocator of the fact that we will 1370 * just repeat this allocation if it fails, to allow 1371 * the allocator to do whatever it needs to attempt to 1372 * free memory. 1373 */ 1374 spin_unlock_irq(cfqd->queue->queue_lock); 1375 new_cfqq = kmem_cache_alloc_node(cfq_pool, 1376 gfp_mask | __GFP_NOFAIL | __GFP_ZERO, 1377 cfqd->queue->node); 1378 spin_lock_irq(cfqd->queue->queue_lock); 1379 goto retry; 1380 } else { 1381 cfqq = kmem_cache_alloc_node(cfq_pool, 1382 gfp_mask | __GFP_ZERO, 1383 cfqd->queue->node); 1384 if (!cfqq) 1385 goto out; 1386 } 1387 1388 RB_CLEAR_NODE(&cfqq->rb_node); 1389 INIT_LIST_HEAD(&cfqq->fifo); 1390 1391 atomic_set(&cfqq->ref, 0); 1392 cfqq->cfqd = cfqd; 1393 1394 cfq_mark_cfqq_prio_changed(cfqq); 1395 cfq_mark_cfqq_queue_new(cfqq); 1396 1397 cfq_init_prio_data(cfqq, ioc); 1398 1399 if (is_sync) { 1400 if (!cfq_class_idle(cfqq)) 1401 cfq_mark_cfqq_idle_window(cfqq); 1402 cfq_mark_cfqq_sync(cfqq); 1403 } 1404 } 1405 1406 if (new_cfqq) 1407 kmem_cache_free(cfq_pool, new_cfqq); 1408 1409out: 1410 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1411 return cfqq; 1412} 1413 1414static struct cfq_queue ** 1415cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1416{ 1417 switch (ioprio_class) { 1418 case IOPRIO_CLASS_RT: 1419 return &cfqd->async_cfqq[0][ioprio]; 1420 case IOPRIO_CLASS_BE: 1421 return &cfqd->async_cfqq[1][ioprio]; 1422 case IOPRIO_CLASS_IDLE: 1423 return &cfqd->async_idle_cfqq; 1424 default: 1425 BUG(); 1426 } 1427} 1428 1429static struct cfq_queue * 1430cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1431 gfp_t gfp_mask) 1432{ 1433 const int ioprio = task_ioprio(ioc); 1434 const int ioprio_class = task_ioprio_class(ioc); 1435 struct cfq_queue **async_cfqq = NULL; 1436 struct cfq_queue *cfqq = NULL; 1437 1438 if (!is_sync) { 1439 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 1440 cfqq = *async_cfqq; 1441 } 1442 1443 if (!cfqq) { 1444 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); 1445 if (!cfqq) 1446 return NULL; 1447 } 1448 1449 /* 1450 * pin the queue now that it's allocated, scheduler exit will prune it 1451 */ 1452 if (!is_sync && !(*async_cfqq)) { 1453 atomic_inc(&cfqq->ref); 1454 *async_cfqq = cfqq; 1455 } 1456 1457 atomic_inc(&cfqq->ref); 1458 return cfqq; 1459} 1460 1461static void cfq_cic_free(struct cfq_io_context *cic) 1462{ 1463 kmem_cache_free(cfq_ioc_pool, cic); 1464 elv_ioc_count_dec(ioc_count); 1465 1466 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 1467 complete(ioc_gone); 1468} 1469 1470/* 1471 * We drop cfq io contexts lazily, so we may find a dead one. 1472 */ 1473static void 1474cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, 1475 struct cfq_io_context *cic) 1476{ 1477 unsigned long flags; 1478 1479 WARN_ON(!list_empty(&cic->queue_list)); 1480 1481 spin_lock_irqsave(&ioc->lock, flags); 1482 1483 if (ioc->ioc_data == cic) 1484 rcu_assign_pointer(ioc->ioc_data, NULL); 1485 1486 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 1487 hlist_del_rcu(&cic->cic_list); 1488 spin_unlock_irqrestore(&ioc->lock, flags); 1489 1490 cfq_cic_free(cic); 1491} 1492 1493static struct cfq_io_context * 1494cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1495{ 1496 struct cfq_io_context *cic; 1497 void *k; 1498 1499 if (unlikely(!ioc)) 1500 return NULL; 1501 1502 /* 1503 * we maintain a last-hit cache, to avoid browsing over the tree 1504 */ 1505 cic = rcu_dereference(ioc->ioc_data); 1506 if (cic && cic->key == cfqd) 1507 return cic; 1508 1509 do { 1510 rcu_read_lock(); 1511 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 1512 rcu_read_unlock(); 1513 if (!cic) 1514 break; 1515 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1516 k = cic->key; 1517 if (unlikely(!k)) { 1518 cfq_drop_dead_cic(cfqd, ioc, cic); 1519 continue; 1520 } 1521 1522 rcu_assign_pointer(ioc->ioc_data, cic); 1523 break; 1524 } while (1); 1525 1526 return cic; 1527} 1528 1529/* 1530 * Add cic into ioc, using cfqd as the search key. This enables us to lookup 1531 * the process specific cfq io context when entered from the block layer. 1532 * Also adds the cic to a per-cfqd list, used when this queue is removed. 1533 */ 1534static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1535 struct cfq_io_context *cic, gfp_t gfp_mask) 1536{ 1537 unsigned long flags; 1538 int ret; 1539 1540 ret = radix_tree_preload(gfp_mask); 1541 if (!ret) { 1542 cic->ioc = ioc; 1543 cic->key = cfqd; 1544 1545 spin_lock_irqsave(&ioc->lock, flags); 1546 ret = radix_tree_insert(&ioc->radix_root, 1547 (unsigned long) cfqd, cic); 1548 if (!ret) 1549 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 1550 spin_unlock_irqrestore(&ioc->lock, flags); 1551 1552 radix_tree_preload_end(); 1553 1554 if (!ret) { 1555 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1556 list_add(&cic->queue_list, &cfqd->cic_list); 1557 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1558 } 1559 } 1560 1561 if (ret) 1562 printk(KERN_ERR "cfq: cic link failed!\n"); 1563 1564 return ret; 1565} 1566 1567/* 1568 * Setup general io context and cfq io context. There can be several cfq 1569 * io contexts per general io context, if this process is doing io to more 1570 * than one device managed by cfq. 1571 */ 1572static struct cfq_io_context * 1573cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1574{ 1575 struct io_context *ioc = NULL; 1576 struct cfq_io_context *cic; 1577 1578 might_sleep_if(gfp_mask & __GFP_WAIT); 1579 1580 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1581 if (!ioc) 1582 return NULL; 1583 1584 cic = cfq_cic_lookup(cfqd, ioc); 1585 if (cic) 1586 goto out; 1587 1588 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1589 if (cic == NULL) 1590 goto err; 1591 1592 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 1593 goto err_free; 1594 1595out: 1596 smp_read_barrier_depends(); 1597 if (unlikely(ioc->ioprio_changed)) 1598 cfq_ioc_set_ioprio(ioc); 1599 1600 return cic; 1601err_free: 1602 cfq_cic_free(cic); 1603err: 1604 put_io_context(ioc); 1605 return NULL; 1606} 1607 1608static void 1609cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1610{ 1611 unsigned long elapsed = jiffies - cic->last_end_request; 1612 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1613 1614 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1615 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1616 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1617} 1618 1619static void 1620cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1621 struct request *rq) 1622{ 1623 sector_t sdist; 1624 u64 total; 1625 1626 if (cic->last_request_pos < rq->sector) 1627 sdist = rq->sector - cic->last_request_pos; 1628 else 1629 sdist = cic->last_request_pos - rq->sector; 1630 1631 /* 1632 * Don't allow the seek distance to get too large from the 1633 * odd fragment, pagein, etc 1634 */ 1635 if (cic->seek_samples <= 60) /* second&third seek */ 1636 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1637 else 1638 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1639 1640 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1641 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1642 total = cic->seek_total + (cic->seek_samples/2); 1643 do_div(total, cic->seek_samples); 1644 cic->seek_mean = (sector_t)total; 1645} 1646 1647/* 1648 * Disable idle window if the process thinks too long or seeks so much that 1649 * it doesn't matter 1650 */ 1651static void 1652cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1653 struct cfq_io_context *cic) 1654{ 1655 int enable_idle; 1656 1657 /* 1658 * Don't idle for async or idle io prio class 1659 */ 1660 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 1661 return; 1662 1663 enable_idle = cfq_cfqq_idle_window(cfqq); 1664 1665 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1666 (cfqd->hw_tag && CIC_SEEKY(cic))) 1667 enable_idle = 0; 1668 else if (sample_valid(cic->ttime_samples)) { 1669 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1670 enable_idle = 0; 1671 else 1672 enable_idle = 1; 1673 } 1674 1675 if (enable_idle) 1676 cfq_mark_cfqq_idle_window(cfqq); 1677 else 1678 cfq_clear_cfqq_idle_window(cfqq); 1679} 1680 1681/* 1682 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1683 * no or if we aren't sure, a 1 will cause a preempt. 1684 */ 1685static int 1686cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1687 struct request *rq) 1688{ 1689 struct cfq_queue *cfqq; 1690 1691 cfqq = cfqd->active_queue; 1692 if (!cfqq) 1693 return 0; 1694 1695 if (cfq_slice_used(cfqq)) 1696 return 1; 1697 1698 if (cfq_class_idle(new_cfqq)) 1699 return 0; 1700 1701 if (cfq_class_idle(cfqq)) 1702 return 1; 1703 1704 /* 1705 * if the new request is sync, but the currently running queue is 1706 * not, let the sync request have priority. 1707 */ 1708 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1709 return 1; 1710 1711 /* 1712 * So both queues are sync. Let the new request get disk time if 1713 * it's a metadata request and the current queue is doing regular IO. 1714 */ 1715 if (rq_is_meta(rq) && !cfqq->meta_pending) 1716 return 1; 1717 1718 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 1719 return 0; 1720 1721 /* 1722 * if this request is as-good as one we would expect from the 1723 * current cfqq, let it preempt 1724 */ 1725 if (cfq_rq_close(cfqd, rq)) 1726 return 1; 1727 1728 return 0; 1729} 1730 1731/* 1732 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1733 * let it have half of its nominal slice. 1734 */ 1735static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1736{ 1737 cfq_slice_expired(cfqd, 1); 1738 1739 /* 1740 * Put the new queue at the front of the of the current list, 1741 * so we know that it will be selected next. 1742 */ 1743 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1744 1745 cfq_service_tree_add(cfqd, cfqq, 1); 1746 1747 cfqq->slice_end = 0; 1748 cfq_mark_cfqq_slice_new(cfqq); 1749} 1750 1751/* 1752 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1753 * something we should do about it 1754 */ 1755static void 1756cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1757 struct request *rq) 1758{ 1759 struct cfq_io_context *cic = RQ_CIC(rq); 1760 1761 if (rq_is_meta(rq)) 1762 cfqq->meta_pending++; 1763 1764 cfq_update_io_thinktime(cfqd, cic); 1765 cfq_update_io_seektime(cfqd, cic, rq); 1766 cfq_update_idle_window(cfqd, cfqq, cic); 1767 1768 cic->last_request_pos = rq->sector + rq->nr_sectors; 1769 1770 if (cfqq == cfqd->active_queue) { 1771 /* 1772 * if we are waiting for a request for this queue, let it rip 1773 * immediately and flag that we must not expire this queue 1774 * just now 1775 */ 1776 if (cfq_cfqq_wait_request(cfqq)) { 1777 cfq_mark_cfqq_must_dispatch(cfqq); 1778 del_timer(&cfqd->idle_slice_timer); 1779 blk_start_queueing(cfqd->queue); 1780 } 1781 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1782 /* 1783 * not the active queue - expire current slice if it is 1784 * idle and has expired it's mean thinktime or this new queue 1785 * has some old slice time left and is of higher priority 1786 */ 1787 cfq_preempt_queue(cfqd, cfqq); 1788 cfq_mark_cfqq_must_dispatch(cfqq); 1789 blk_start_queueing(cfqd->queue); 1790 } 1791} 1792 1793static void cfq_insert_request(struct request_queue *q, struct request *rq) 1794{ 1795 struct cfq_data *cfqd = q->elevator->elevator_data; 1796 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1797 1798 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 1799 1800 cfq_add_rq_rb(rq); 1801 1802 list_add_tail(&rq->queuelist, &cfqq->fifo); 1803 1804 cfq_rq_enqueued(cfqd, cfqq, rq); 1805} 1806 1807static void cfq_completed_request(struct request_queue *q, struct request *rq) 1808{ 1809 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1810 struct cfq_data *cfqd = cfqq->cfqd; 1811 const int sync = rq_is_sync(rq); 1812 unsigned long now; 1813 1814 now = jiffies; 1815 1816 WARN_ON(!cfqd->rq_in_driver); 1817 WARN_ON(!cfqq->dispatched); 1818 cfqd->rq_in_driver--; 1819 cfqq->dispatched--; 1820 1821 if (cfq_cfqq_sync(cfqq)) 1822 cfqd->sync_flight--; 1823 1824 if (!cfq_class_idle(cfqq)) 1825 cfqd->last_end_request = now; 1826 1827 if (sync) 1828 RQ_CIC(rq)->last_end_request = now; 1829 1830 /* 1831 * If this is the active queue, check if it needs to be expired, 1832 * or if we want to idle in case it has no pending requests. 1833 */ 1834 if (cfqd->active_queue == cfqq) { 1835 if (cfq_cfqq_slice_new(cfqq)) { 1836 cfq_set_prio_slice(cfqd, cfqq); 1837 cfq_clear_cfqq_slice_new(cfqq); 1838 } 1839 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 1840 cfq_slice_expired(cfqd, 1); 1841 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) 1842 cfq_arm_slice_timer(cfqd); 1843 } 1844 1845 if (!cfqd->rq_in_driver) 1846 cfq_schedule_dispatch(cfqd); 1847} 1848 1849/* 1850 * we temporarily boost lower priority queues if they are holding fs exclusive 1851 * resources. they are boosted to normal prio (CLASS_BE/4) 1852 */ 1853static void cfq_prio_boost(struct cfq_queue *cfqq) 1854{ 1855 if (has_fs_excl()) { 1856 /* 1857 * boost idle prio on transactions that would lock out other 1858 * users of the filesystem 1859 */ 1860 if (cfq_class_idle(cfqq)) 1861 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1862 if (cfqq->ioprio > IOPRIO_NORM) 1863 cfqq->ioprio = IOPRIO_NORM; 1864 } else { 1865 /* 1866 * check if we need to unboost the queue 1867 */ 1868 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 1869 cfqq->ioprio_class = cfqq->org_ioprio_class; 1870 if (cfqq->ioprio != cfqq->org_ioprio) 1871 cfqq->ioprio = cfqq->org_ioprio; 1872 } 1873} 1874 1875static inline int __cfq_may_queue(struct cfq_queue *cfqq) 1876{ 1877 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1878 !cfq_cfqq_must_alloc_slice(cfqq)) { 1879 cfq_mark_cfqq_must_alloc_slice(cfqq); 1880 return ELV_MQUEUE_MUST; 1881 } 1882 1883 return ELV_MQUEUE_MAY; 1884} 1885 1886static int cfq_may_queue(struct request_queue *q, int rw) 1887{ 1888 struct cfq_data *cfqd = q->elevator->elevator_data; 1889 struct task_struct *tsk = current; 1890 struct cfq_io_context *cic; 1891 struct cfq_queue *cfqq; 1892 1893 /* 1894 * don't force setup of a queue from here, as a call to may_queue 1895 * does not necessarily imply that a request actually will be queued. 1896 * so just lookup a possibly existing queue, or return 'may queue' 1897 * if that fails 1898 */ 1899 cic = cfq_cic_lookup(cfqd, tsk->io_context); 1900 if (!cic) 1901 return ELV_MQUEUE_MAY; 1902 1903 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 1904 if (cfqq) { 1905 cfq_init_prio_data(cfqq, cic->ioc); 1906 cfq_prio_boost(cfqq); 1907 1908 return __cfq_may_queue(cfqq); 1909 } 1910 1911 return ELV_MQUEUE_MAY; 1912} 1913 1914/* 1915 * queue lock held here 1916 */ 1917static void cfq_put_request(struct request *rq) 1918{ 1919 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1920 1921 if (cfqq) { 1922 const int rw = rq_data_dir(rq); 1923 1924 BUG_ON(!cfqq->allocated[rw]); 1925 cfqq->allocated[rw]--; 1926 1927 put_io_context(RQ_CIC(rq)->ioc); 1928 1929 rq->elevator_private = NULL; 1930 rq->elevator_private2 = NULL; 1931 1932 cfq_put_queue(cfqq); 1933 } 1934} 1935 1936/* 1937 * Allocate cfq data structures associated with this request. 1938 */ 1939static int 1940cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 1941{ 1942 struct cfq_data *cfqd = q->elevator->elevator_data; 1943 struct cfq_io_context *cic; 1944 const int rw = rq_data_dir(rq); 1945 const int is_sync = rq_is_sync(rq); 1946 struct cfq_queue *cfqq; 1947 unsigned long flags; 1948 1949 might_sleep_if(gfp_mask & __GFP_WAIT); 1950 1951 cic = cfq_get_io_context(cfqd, gfp_mask); 1952 1953 spin_lock_irqsave(q->queue_lock, flags); 1954 1955 if (!cic) 1956 goto queue_fail; 1957 1958 cfqq = cic_to_cfqq(cic, is_sync); 1959 if (!cfqq) { 1960 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 1961 1962 if (!cfqq) 1963 goto queue_fail; 1964 1965 cic_set_cfqq(cic, cfqq, is_sync); 1966 } 1967 1968 cfqq->allocated[rw]++; 1969 cfq_clear_cfqq_must_alloc(cfqq); 1970 atomic_inc(&cfqq->ref); 1971 1972 spin_unlock_irqrestore(q->queue_lock, flags); 1973 1974 rq->elevator_private = cic; 1975 rq->elevator_private2 = cfqq; 1976 return 0; 1977 1978queue_fail: 1979 if (cic) 1980 put_io_context(cic->ioc); 1981 1982 cfq_schedule_dispatch(cfqd); 1983 spin_unlock_irqrestore(q->queue_lock, flags); 1984 return 1; 1985} 1986 1987static void cfq_kick_queue(struct work_struct *work) 1988{ 1989 struct cfq_data *cfqd = 1990 container_of(work, struct cfq_data, unplug_work); 1991 struct request_queue *q = cfqd->queue; 1992 unsigned long flags; 1993 1994 spin_lock_irqsave(q->queue_lock, flags); 1995 blk_start_queueing(q); 1996 spin_unlock_irqrestore(q->queue_lock, flags); 1997} 1998 1999/* 2000 * Timer running if the active_queue is currently idling inside its time slice 2001 */ 2002static void cfq_idle_slice_timer(unsigned long data) 2003{ 2004 struct cfq_data *cfqd = (struct cfq_data *) data; 2005 struct cfq_queue *cfqq; 2006 unsigned long flags; 2007 int timed_out = 1; 2008 2009 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2010 2011 cfqq = cfqd->active_queue; 2012 if (cfqq) { 2013 timed_out = 0; 2014 2015 /* 2016 * expired 2017 */ 2018 if (cfq_slice_used(cfqq)) 2019 goto expire; 2020 2021 /* 2022 * only expire and reinvoke request handler, if there are 2023 * other queues with pending requests 2024 */ 2025 if (!cfqd->busy_queues) 2026 goto out_cont; 2027 2028 /* 2029 * not expired and it has a request pending, let it dispatch 2030 */ 2031 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 2032 cfq_mark_cfqq_must_dispatch(cfqq); 2033 goto out_kick; 2034 } 2035 } 2036expire: 2037 cfq_slice_expired(cfqd, timed_out); 2038out_kick: 2039 cfq_schedule_dispatch(cfqd); 2040out_cont: 2041 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2042} 2043 2044static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2045{ 2046 del_timer_sync(&cfqd->idle_slice_timer); 2047 kblockd_flush_work(&cfqd->unplug_work); 2048} 2049 2050static void cfq_put_async_queues(struct cfq_data *cfqd) 2051{ 2052 int i; 2053 2054 for (i = 0; i < IOPRIO_BE_NR; i++) { 2055 if (cfqd->async_cfqq[0][i]) 2056 cfq_put_queue(cfqd->async_cfqq[0][i]); 2057 if (cfqd->async_cfqq[1][i]) 2058 cfq_put_queue(cfqd->async_cfqq[1][i]); 2059 } 2060 2061 if (cfqd->async_idle_cfqq) 2062 cfq_put_queue(cfqd->async_idle_cfqq); 2063} 2064 2065static void cfq_exit_queue(elevator_t *e) 2066{ 2067 struct cfq_data *cfqd = e->elevator_data; 2068 struct request_queue *q = cfqd->queue; 2069 2070 cfq_shutdown_timer_wq(cfqd); 2071 2072 spin_lock_irq(q->queue_lock); 2073 2074 if (cfqd->active_queue) 2075 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2076 2077 while (!list_empty(&cfqd->cic_list)) { 2078 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2079 struct cfq_io_context, 2080 queue_list); 2081 2082 __cfq_exit_single_io_context(cfqd, cic); 2083 } 2084 2085 cfq_put_async_queues(cfqd); 2086 2087 spin_unlock_irq(q->queue_lock); 2088 2089 cfq_shutdown_timer_wq(cfqd); 2090 2091 kfree(cfqd); 2092} 2093 2094static void *cfq_init_queue(struct request_queue *q) 2095{ 2096 struct cfq_data *cfqd; 2097 2098 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2099 if (!cfqd) 2100 return NULL; 2101 2102 cfqd->service_tree = CFQ_RB_ROOT; 2103 INIT_LIST_HEAD(&cfqd->cic_list); 2104 2105 cfqd->queue = q; 2106 2107 init_timer(&cfqd->idle_slice_timer); 2108 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2109 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2110 2111 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2112 2113 cfqd->last_end_request = jiffies; 2114 cfqd->cfq_quantum = cfq_quantum; 2115 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2116 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2117 cfqd->cfq_back_max = cfq_back_max; 2118 cfqd->cfq_back_penalty = cfq_back_penalty; 2119 cfqd->cfq_slice[0] = cfq_slice_async; 2120 cfqd->cfq_slice[1] = cfq_slice_sync; 2121 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2122 cfqd->cfq_slice_idle = cfq_slice_idle; 2123 2124 return cfqd; 2125} 2126 2127static void cfq_slab_kill(void) 2128{ 2129 if (cfq_pool) 2130 kmem_cache_destroy(cfq_pool); 2131 if (cfq_ioc_pool) 2132 kmem_cache_destroy(cfq_ioc_pool); 2133} 2134 2135static int __init cfq_slab_setup(void) 2136{ 2137 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2138 if (!cfq_pool) 2139 goto fail; 2140 2141 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU); 2142 if (!cfq_ioc_pool) 2143 goto fail; 2144 2145 return 0; 2146fail: 2147 cfq_slab_kill(); 2148 return -ENOMEM; 2149} 2150 2151/* 2152 * sysfs parts below --> 2153 */ 2154static ssize_t 2155cfq_var_show(unsigned int var, char *page) 2156{ 2157 return sprintf(page, "%d\n", var); 2158} 2159 2160static ssize_t 2161cfq_var_store(unsigned int *var, const char *page, size_t count) 2162{ 2163 char *p = (char *) page; 2164 2165 *var = simple_strtoul(p, &p, 10); 2166 return count; 2167} 2168 2169#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2170static ssize_t __FUNC(elevator_t *e, char *page) \ 2171{ \ 2172 struct cfq_data *cfqd = e->elevator_data; \ 2173 unsigned int __data = __VAR; \ 2174 if (__CONV) \ 2175 __data = jiffies_to_msecs(__data); \ 2176 return cfq_var_show(__data, (page)); \ 2177} 2178SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2179SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2180SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2181SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2182SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2183SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2184SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2185SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2186SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2187#undef SHOW_FUNCTION 2188 2189#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2190static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 2191{ \ 2192 struct cfq_data *cfqd = e->elevator_data; \ 2193 unsigned int __data; \ 2194 int ret = cfq_var_store(&__data, (page), count); \ 2195 if (__data < (MIN)) \ 2196 __data = (MIN); \ 2197 else if (__data > (MAX)) \ 2198 __data = (MAX); \ 2199 if (__CONV) \ 2200 *(__PTR) = msecs_to_jiffies(__data); \ 2201 else \ 2202 *(__PTR) = __data; \ 2203 return ret; \ 2204} 2205STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2206STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 2207 UINT_MAX, 1); 2208STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 2209 UINT_MAX, 1); 2210STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2211STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 2212 UINT_MAX, 0); 2213STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2214STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2215STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2216STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2217 UINT_MAX, 0); 2218#undef STORE_FUNCTION 2219 2220#define CFQ_ATTR(name) \ 2221 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2222 2223static struct elv_fs_entry cfq_attrs[] = { 2224 CFQ_ATTR(quantum), 2225 CFQ_ATTR(fifo_expire_sync), 2226 CFQ_ATTR(fifo_expire_async), 2227 CFQ_ATTR(back_seek_max), 2228 CFQ_ATTR(back_seek_penalty), 2229 CFQ_ATTR(slice_sync), 2230 CFQ_ATTR(slice_async), 2231 CFQ_ATTR(slice_async_rq), 2232 CFQ_ATTR(slice_idle), 2233 __ATTR_NULL 2234}; 2235 2236static struct elevator_type iosched_cfq = { 2237 .ops = { 2238 .elevator_merge_fn = cfq_merge, 2239 .elevator_merged_fn = cfq_merged_request, 2240 .elevator_merge_req_fn = cfq_merged_requests, 2241 .elevator_allow_merge_fn = cfq_allow_merge, 2242 .elevator_dispatch_fn = cfq_dispatch_requests, 2243 .elevator_add_req_fn = cfq_insert_request, 2244 .elevator_activate_req_fn = cfq_activate_request, 2245 .elevator_deactivate_req_fn = cfq_deactivate_request, 2246 .elevator_queue_empty_fn = cfq_queue_empty, 2247 .elevator_completed_req_fn = cfq_completed_request, 2248 .elevator_former_req_fn = elv_rb_former_request, 2249 .elevator_latter_req_fn = elv_rb_latter_request, 2250 .elevator_set_req_fn = cfq_set_request, 2251 .elevator_put_req_fn = cfq_put_request, 2252 .elevator_may_queue_fn = cfq_may_queue, 2253 .elevator_init_fn = cfq_init_queue, 2254 .elevator_exit_fn = cfq_exit_queue, 2255 .trim = cfq_free_io_context, 2256 }, 2257 .elevator_attrs = cfq_attrs, 2258 .elevator_name = "cfq", 2259 .elevator_owner = THIS_MODULE, 2260}; 2261 2262static int __init cfq_init(void) 2263{ 2264 /* 2265 * could be 0 on HZ < 1000 setups 2266 */ 2267 if (!cfq_slice_async) 2268 cfq_slice_async = 1; 2269 if (!cfq_slice_idle) 2270 cfq_slice_idle = 1; 2271 2272 if (cfq_slab_setup()) 2273 return -ENOMEM; 2274 2275 elv_register(&iosched_cfq); 2276 2277 return 0; 2278} 2279 2280static void __exit cfq_exit(void) 2281{ 2282 DECLARE_COMPLETION_ONSTACK(all_gone); 2283 elv_unregister(&iosched_cfq); 2284 ioc_gone = &all_gone; 2285 /* ioc_gone's update must be visible before reading ioc_count */ 2286 smp_wmb(); 2287 if (elv_ioc_count_read(ioc_count)) 2288 wait_for_completion(ioc_gone); 2289 synchronize_rcu(); 2290 cfq_slab_kill(); 2291} 2292 2293module_init(cfq_init); 2294module_exit(cfq_exit); 2295 2296MODULE_AUTHOR("Jens Axboe"); 2297MODULE_LICENSE("GPL"); 2298MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 2299