cfq-iosched.c revision 1792669cc1acc2069869b7ca41a0195240de05e0
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/hash.h> 13#include <linux/rbtree.h> 14#include <linux/ioprio.h> 15 16/* 17 * tunables 18 */ 19static const int cfq_quantum = 4; /* max queue in one round of service */ 20static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 21static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 22static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 23 24static const int cfq_slice_sync = HZ / 10; 25static int cfq_slice_async = HZ / 25; 26static const int cfq_slice_async_rq = 2; 27static int cfq_slice_idle = HZ / 125; 28 29#define CFQ_IDLE_GRACE (HZ / 10) 30#define CFQ_SLICE_SCALE (5) 31 32#define CFQ_KEY_ASYNC (0) 33 34/* 35 * for the hash of cfqq inside the cfqd 36 */ 37#define CFQ_QHASH_SHIFT 6 38#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT) 39#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash) 40 41#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) 42 43#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 44#define RQ_CFQQ(rq) ((rq)->elevator_private2) 45 46static struct kmem_cache *cfq_pool; 47static struct kmem_cache *cfq_ioc_pool; 48 49static DEFINE_PER_CPU(unsigned long, ioc_count); 50static struct completion *ioc_gone; 51 52#define CFQ_PRIO_LISTS IOPRIO_BE_NR 53#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 54#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 55 56#define ASYNC (0) 57#define SYNC (1) 58 59#define cfq_cfqq_dispatched(cfqq) \ 60 ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC]) 61 62#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC) 63 64#define cfq_cfqq_sync(cfqq) \ 65 (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) 66 67#define sample_valid(samples) ((samples) > 80) 68 69/* 70 * Per block device queue structure 71 */ 72struct cfq_data { 73 request_queue_t *queue; 74 75 /* 76 * rr list of queues with requests and the count of them 77 */ 78 struct list_head rr_list[CFQ_PRIO_LISTS]; 79 struct list_head busy_rr; 80 struct list_head cur_rr; 81 struct list_head idle_rr; 82 unsigned int busy_queues; 83 84 /* 85 * cfqq lookup hash 86 */ 87 struct hlist_head *cfq_hash; 88 89 int rq_in_driver; 90 int hw_tag; 91 92 /* 93 * idle window management 94 */ 95 struct timer_list idle_slice_timer; 96 struct work_struct unplug_work; 97 98 struct cfq_queue *active_queue; 99 struct cfq_io_context *active_cic; 100 int cur_prio, cur_end_prio; 101 unsigned int dispatch_slice; 102 103 struct timer_list idle_class_timer; 104 105 sector_t last_sector; 106 unsigned long last_end_request; 107 108 /* 109 * tunables, see top of file 110 */ 111 unsigned int cfq_quantum; 112 unsigned int cfq_fifo_expire[2]; 113 unsigned int cfq_back_penalty; 114 unsigned int cfq_back_max; 115 unsigned int cfq_slice[2]; 116 unsigned int cfq_slice_async_rq; 117 unsigned int cfq_slice_idle; 118 119 struct list_head cic_list; 120}; 121 122/* 123 * Per process-grouping structure 124 */ 125struct cfq_queue { 126 /* reference count */ 127 atomic_t ref; 128 /* parent cfq_data */ 129 struct cfq_data *cfqd; 130 /* cfqq lookup hash */ 131 struct hlist_node cfq_hash; 132 /* hash key */ 133 unsigned int key; 134 /* member of the rr/busy/cur/idle cfqd list */ 135 struct list_head cfq_list; 136 /* sorted list of pending requests */ 137 struct rb_root sort_list; 138 /* if fifo isn't expired, next request to serve */ 139 struct request *next_rq; 140 /* requests queued in sort_list */ 141 int queued[2]; 142 /* currently allocated requests */ 143 int allocated[2]; 144 /* pending metadata requests */ 145 int meta_pending; 146 /* fifo list of requests in sort_list */ 147 struct list_head fifo; 148 149 unsigned long slice_end; 150 unsigned long service_last; 151 long slice_resid; 152 153 /* number of requests that are on the dispatch list */ 154 int on_dispatch[2]; 155 156 /* io prio of this group */ 157 unsigned short ioprio, org_ioprio; 158 unsigned short ioprio_class, org_ioprio_class; 159 160 /* various state flags, see below */ 161 unsigned int flags; 162}; 163 164enum cfqq_state_flags { 165 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 166 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 167 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 168 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 169 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ 170 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 171 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 172 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 173 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ 174 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 175}; 176 177#define CFQ_CFQQ_FNS(name) \ 178static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 179{ \ 180 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 181} \ 182static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 183{ \ 184 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 185} \ 186static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 187{ \ 188 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 189} 190 191CFQ_CFQQ_FNS(on_rr); 192CFQ_CFQQ_FNS(wait_request); 193CFQ_CFQQ_FNS(must_alloc); 194CFQ_CFQQ_FNS(must_alloc_slice); 195CFQ_CFQQ_FNS(must_dispatch); 196CFQ_CFQQ_FNS(fifo_expire); 197CFQ_CFQQ_FNS(idle_window); 198CFQ_CFQQ_FNS(prio_changed); 199CFQ_CFQQ_FNS(queue_new); 200CFQ_CFQQ_FNS(slice_new); 201#undef CFQ_CFQQ_FNS 202 203static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); 204static void cfq_dispatch_insert(request_queue_t *, struct request *); 205static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); 206 207/* 208 * scheduler run of queue, if there are requests pending and no one in the 209 * driver that will restart queueing 210 */ 211static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 212{ 213 if (cfqd->busy_queues) 214 kblockd_schedule_work(&cfqd->unplug_work); 215} 216 217static int cfq_queue_empty(request_queue_t *q) 218{ 219 struct cfq_data *cfqd = q->elevator->elevator_data; 220 221 return !cfqd->busy_queues; 222} 223 224static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) 225{ 226 /* 227 * Use the per-process queue, for read requests and syncronous writes 228 */ 229 if (!(rw & REQ_RW) || is_sync) 230 return task->pid; 231 232 return CFQ_KEY_ASYNC; 233} 234 235/* 236 * Scale schedule slice based on io priority. Use the sync time slice only 237 * if a queue is marked sync and has sync io queued. A sync queue with async 238 * io only, should not get full sync slice length. 239 */ 240static inline int 241cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 242{ 243 const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; 244 245 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 246 247 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); 248} 249 250static inline void 251cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 252{ 253 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 254 cfqq->slice_end += cfqq->slice_resid; 255 256 /* 257 * Don't carry over residual for more than one slice, we only want 258 * to slightly correct the fairness. Carrying over forever would 259 * easily introduce oscillations. 260 */ 261 cfqq->slice_resid = 0; 262} 263 264/* 265 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 266 * isn't valid until the first request from the dispatch is activated 267 * and the slice time set. 268 */ 269static inline int cfq_slice_used(struct cfq_queue *cfqq) 270{ 271 if (cfq_cfqq_slice_new(cfqq)) 272 return 0; 273 if (time_before(jiffies, cfqq->slice_end)) 274 return 0; 275 276 return 1; 277} 278 279/* 280 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 281 * We choose the request that is closest to the head right now. Distance 282 * behind the head is penalized and only allowed to a certain extent. 283 */ 284static struct request * 285cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 286{ 287 sector_t last, s1, s2, d1 = 0, d2 = 0; 288 unsigned long back_max; 289#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 290#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 291 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 292 293 if (rq1 == NULL || rq1 == rq2) 294 return rq2; 295 if (rq2 == NULL) 296 return rq1; 297 298 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 299 return rq1; 300 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 301 return rq2; 302 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 303 return rq1; 304 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 305 return rq2; 306 307 s1 = rq1->sector; 308 s2 = rq2->sector; 309 310 last = cfqd->last_sector; 311 312 /* 313 * by definition, 1KiB is 2 sectors 314 */ 315 back_max = cfqd->cfq_back_max * 2; 316 317 /* 318 * Strict one way elevator _except_ in the case where we allow 319 * short backward seeks which are biased as twice the cost of a 320 * similar forward seek. 321 */ 322 if (s1 >= last) 323 d1 = s1 - last; 324 else if (s1 + back_max >= last) 325 d1 = (last - s1) * cfqd->cfq_back_penalty; 326 else 327 wrap |= CFQ_RQ1_WRAP; 328 329 if (s2 >= last) 330 d2 = s2 - last; 331 else if (s2 + back_max >= last) 332 d2 = (last - s2) * cfqd->cfq_back_penalty; 333 else 334 wrap |= CFQ_RQ2_WRAP; 335 336 /* Found required data */ 337 338 /* 339 * By doing switch() on the bit mask "wrap" we avoid having to 340 * check two variables for all permutations: --> faster! 341 */ 342 switch (wrap) { 343 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 344 if (d1 < d2) 345 return rq1; 346 else if (d2 < d1) 347 return rq2; 348 else { 349 if (s1 >= s2) 350 return rq1; 351 else 352 return rq2; 353 } 354 355 case CFQ_RQ2_WRAP: 356 return rq1; 357 case CFQ_RQ1_WRAP: 358 return rq2; 359 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 360 default: 361 /* 362 * Since both rqs are wrapped, 363 * start with the one that's further behind head 364 * (--> only *one* back seek required), 365 * since back seek takes more time than forward. 366 */ 367 if (s1 <= s2) 368 return rq1; 369 else 370 return rq2; 371 } 372} 373 374/* 375 * would be nice to take fifo expire time into account as well 376 */ 377static struct request * 378cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 379 struct request *last) 380{ 381 struct rb_node *rbnext = rb_next(&last->rb_node); 382 struct rb_node *rbprev = rb_prev(&last->rb_node); 383 struct request *next = NULL, *prev = NULL; 384 385 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 386 387 if (rbprev) 388 prev = rb_entry_rq(rbprev); 389 390 if (rbnext) 391 next = rb_entry_rq(rbnext); 392 else { 393 rbnext = rb_first(&cfqq->sort_list); 394 if (rbnext && rbnext != &last->rb_node) 395 next = rb_entry_rq(rbnext); 396 } 397 398 return cfq_choose_req(cfqd, next, prev); 399} 400 401static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) 402{ 403 struct cfq_data *cfqd = cfqq->cfqd; 404 struct list_head *list, *n; 405 struct cfq_queue *__cfqq; 406 407 /* 408 * Resorting requires the cfqq to be on the RR list already. 409 */ 410 if (!cfq_cfqq_on_rr(cfqq)) 411 return; 412 413 list_del(&cfqq->cfq_list); 414 415 if (cfq_class_rt(cfqq)) 416 list = &cfqd->cur_rr; 417 else if (cfq_class_idle(cfqq)) 418 list = &cfqd->idle_rr; 419 else { 420 /* 421 * if cfqq has requests in flight, don't allow it to be 422 * found in cfq_set_active_queue before it has finished them. 423 * this is done to increase fairness between a process that 424 * has lots of io pending vs one that only generates one 425 * sporadically or synchronously 426 */ 427 if (cfq_cfqq_dispatched(cfqq)) 428 list = &cfqd->busy_rr; 429 else 430 list = &cfqd->rr_list[cfqq->ioprio]; 431 } 432 433 if (preempted || cfq_cfqq_queue_new(cfqq)) { 434 /* 435 * If this queue was preempted or is new (never been serviced), 436 * let it be added first for fairness but beind other new 437 * queues. 438 */ 439 n = list; 440 while (n->next != list) { 441 __cfqq = list_entry_cfqq(n->next); 442 if (!cfq_cfqq_queue_new(__cfqq)) 443 break; 444 445 n = n->next; 446 } 447 list_add_tail(&cfqq->cfq_list, n); 448 } else if (!cfq_cfqq_class_sync(cfqq)) { 449 /* 450 * async queue always goes to the end. this wont be overly 451 * unfair to writes, as the sort of the sync queue wont be 452 * allowed to pass the async queue again. 453 */ 454 list_add_tail(&cfqq->cfq_list, list); 455 } else { 456 /* 457 * sort by last service, but don't cross a new or async 458 * queue. we don't cross a new queue because it hasn't been 459 * service before, and we don't cross an async queue because 460 * it gets added to the end on expire. 461 */ 462 n = list; 463 while ((n = n->prev) != list) { 464 struct cfq_queue *__cfqq = list_entry_cfqq(n); 465 466 if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last) 467 break; 468 if (time_before(__cfqq->service_last, cfqq->service_last)) 469 break; 470 } 471 list_add(&cfqq->cfq_list, n); 472 } 473} 474 475/* 476 * add to busy list of queues for service, trying to be fair in ordering 477 * the pending list according to last request service 478 */ 479static inline void 480cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 481{ 482 BUG_ON(cfq_cfqq_on_rr(cfqq)); 483 cfq_mark_cfqq_on_rr(cfqq); 484 cfqd->busy_queues++; 485 486 cfq_resort_rr_list(cfqq, 0); 487} 488 489static inline void 490cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 491{ 492 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 493 cfq_clear_cfqq_on_rr(cfqq); 494 list_del_init(&cfqq->cfq_list); 495 496 BUG_ON(!cfqd->busy_queues); 497 cfqd->busy_queues--; 498} 499 500/* 501 * rb tree support functions 502 */ 503static inline void cfq_del_rq_rb(struct request *rq) 504{ 505 struct cfq_queue *cfqq = RQ_CFQQ(rq); 506 struct cfq_data *cfqd = cfqq->cfqd; 507 const int sync = rq_is_sync(rq); 508 509 BUG_ON(!cfqq->queued[sync]); 510 cfqq->queued[sync]--; 511 512 elv_rb_del(&cfqq->sort_list, rq); 513 514 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 515 cfq_del_cfqq_rr(cfqd, cfqq); 516} 517 518static void cfq_add_rq_rb(struct request *rq) 519{ 520 struct cfq_queue *cfqq = RQ_CFQQ(rq); 521 struct cfq_data *cfqd = cfqq->cfqd; 522 struct request *__alias; 523 524 cfqq->queued[rq_is_sync(rq)]++; 525 526 /* 527 * looks a little odd, but the first insert might return an alias. 528 * if that happens, put the alias on the dispatch list 529 */ 530 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 531 cfq_dispatch_insert(cfqd->queue, __alias); 532 533 if (!cfq_cfqq_on_rr(cfqq)) 534 cfq_add_cfqq_rr(cfqd, cfqq); 535} 536 537static inline void 538cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 539{ 540 elv_rb_del(&cfqq->sort_list, rq); 541 cfqq->queued[rq_is_sync(rq)]--; 542 cfq_add_rq_rb(rq); 543} 544 545static struct request * 546cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 547{ 548 struct task_struct *tsk = current; 549 pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); 550 struct cfq_queue *cfqq; 551 552 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 553 if (cfqq) { 554 sector_t sector = bio->bi_sector + bio_sectors(bio); 555 556 return elv_rb_find(&cfqq->sort_list, sector); 557 } 558 559 return NULL; 560} 561 562static void cfq_activate_request(request_queue_t *q, struct request *rq) 563{ 564 struct cfq_data *cfqd = q->elevator->elevator_data; 565 566 cfqd->rq_in_driver++; 567 568 /* 569 * If the depth is larger 1, it really could be queueing. But lets 570 * make the mark a little higher - idling could still be good for 571 * low queueing, and a low queueing number could also just indicate 572 * a SCSI mid layer like behaviour where limit+1 is often seen. 573 */ 574 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 575 cfqd->hw_tag = 1; 576} 577 578static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 579{ 580 struct cfq_data *cfqd = q->elevator->elevator_data; 581 582 WARN_ON(!cfqd->rq_in_driver); 583 cfqd->rq_in_driver--; 584} 585 586static void cfq_remove_request(struct request *rq) 587{ 588 struct cfq_queue *cfqq = RQ_CFQQ(rq); 589 590 if (cfqq->next_rq == rq) 591 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 592 593 list_del_init(&rq->queuelist); 594 cfq_del_rq_rb(rq); 595 596 if (rq_is_meta(rq)) { 597 WARN_ON(!cfqq->meta_pending); 598 cfqq->meta_pending--; 599 } 600} 601 602static int 603cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) 604{ 605 struct cfq_data *cfqd = q->elevator->elevator_data; 606 struct request *__rq; 607 608 __rq = cfq_find_rq_fmerge(cfqd, bio); 609 if (__rq && elv_rq_merge_ok(__rq, bio)) { 610 *req = __rq; 611 return ELEVATOR_FRONT_MERGE; 612 } 613 614 return ELEVATOR_NO_MERGE; 615} 616 617static void cfq_merged_request(request_queue_t *q, struct request *req, 618 int type) 619{ 620 if (type == ELEVATOR_FRONT_MERGE) { 621 struct cfq_queue *cfqq = RQ_CFQQ(req); 622 623 cfq_reposition_rq_rb(cfqq, req); 624 } 625} 626 627static void 628cfq_merged_requests(request_queue_t *q, struct request *rq, 629 struct request *next) 630{ 631 /* 632 * reposition in fifo if next is older than rq 633 */ 634 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 635 time_before(next->start_time, rq->start_time)) 636 list_move(&rq->queuelist, &next->queuelist); 637 638 cfq_remove_request(next); 639} 640 641static int cfq_allow_merge(request_queue_t *q, struct request *rq, 642 struct bio *bio) 643{ 644 struct cfq_data *cfqd = q->elevator->elevator_data; 645 const int rw = bio_data_dir(bio); 646 struct cfq_queue *cfqq; 647 pid_t key; 648 649 /* 650 * Disallow merge of a sync bio into an async request. 651 */ 652 if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq)) 653 return 0; 654 655 /* 656 * Lookup the cfqq that this bio will be queued with. Allow 657 * merge only if rq is queued there. 658 */ 659 key = cfq_queue_pid(current, rw, bio_sync(bio)); 660 cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); 661 662 if (cfqq == RQ_CFQQ(rq)) 663 return 1; 664 665 return 0; 666} 667 668static inline void 669__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 670{ 671 if (cfqq) { 672 /* 673 * stop potential idle class queues waiting service 674 */ 675 del_timer(&cfqd->idle_class_timer); 676 677 cfqq->slice_end = 0; 678 cfq_clear_cfqq_must_alloc_slice(cfqq); 679 cfq_clear_cfqq_fifo_expire(cfqq); 680 cfq_mark_cfqq_slice_new(cfqq); 681 } 682 683 cfqd->active_queue = cfqq; 684} 685 686/* 687 * current cfqq expired its slice (or was too idle), select new one 688 */ 689static void 690__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 691 int preempted) 692{ 693 if (cfq_cfqq_wait_request(cfqq)) 694 del_timer(&cfqd->idle_slice_timer); 695 696 if (!preempted && !cfq_cfqq_dispatched(cfqq)) 697 cfq_schedule_dispatch(cfqd); 698 699 cfq_clear_cfqq_must_dispatch(cfqq); 700 cfq_clear_cfqq_wait_request(cfqq); 701 cfq_clear_cfqq_queue_new(cfqq); 702 703 /* 704 * store what was left of this slice, if the queue idled out 705 * or was preempted 706 */ 707 if (!cfq_cfqq_slice_new(cfqq)) 708 cfqq->slice_resid = cfqq->slice_end - jiffies; 709 710 cfq_resort_rr_list(cfqq, preempted); 711 712 if (cfqq == cfqd->active_queue) 713 cfqd->active_queue = NULL; 714 715 if (cfqd->active_cic) { 716 put_io_context(cfqd->active_cic->ioc); 717 cfqd->active_cic = NULL; 718 } 719 720 cfqd->dispatch_slice = 0; 721} 722 723static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) 724{ 725 struct cfq_queue *cfqq = cfqd->active_queue; 726 727 if (cfqq) 728 __cfq_slice_expired(cfqd, cfqq, preempted); 729} 730 731/* 732 * 0 733 * 0,1 734 * 0,1,2 735 * 0,1,2,3 736 * 0,1,2,3,4 737 * 0,1,2,3,4,5 738 * 0,1,2,3,4,5,6 739 * 0,1,2,3,4,5,6,7 740 */ 741static int cfq_get_next_prio_level(struct cfq_data *cfqd) 742{ 743 int prio, wrap; 744 745 prio = -1; 746 wrap = 0; 747 do { 748 int p; 749 750 for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { 751 if (!list_empty(&cfqd->rr_list[p])) { 752 prio = p; 753 break; 754 } 755 } 756 757 if (prio != -1) 758 break; 759 cfqd->cur_prio = 0; 760 if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { 761 cfqd->cur_end_prio = 0; 762 if (wrap) 763 break; 764 wrap = 1; 765 } 766 } while (1); 767 768 if (unlikely(prio == -1)) 769 return -1; 770 771 BUG_ON(prio >= CFQ_PRIO_LISTS); 772 773 list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); 774 775 cfqd->cur_prio = prio + 1; 776 if (cfqd->cur_prio > cfqd->cur_end_prio) { 777 cfqd->cur_end_prio = cfqd->cur_prio; 778 cfqd->cur_prio = 0; 779 } 780 if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) { 781 cfqd->cur_prio = 0; 782 cfqd->cur_end_prio = 0; 783 } 784 785 return prio; 786} 787 788static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 789{ 790 struct cfq_queue *cfqq = NULL; 791 792 if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) { 793 /* 794 * if current list is non-empty, grab first entry. if it is 795 * empty, get next prio level and grab first entry then if any 796 * are spliced 797 */ 798 cfqq = list_entry_cfqq(cfqd->cur_rr.next); 799 } else if (!list_empty(&cfqd->busy_rr)) { 800 /* 801 * If no new queues are available, check if the busy list has 802 * some before falling back to idle io. 803 */ 804 cfqq = list_entry_cfqq(cfqd->busy_rr.next); 805 } else if (!list_empty(&cfqd->idle_rr)) { 806 /* 807 * if we have idle queues and no rt or be queues had pending 808 * requests, either allow immediate service if the grace period 809 * has passed or arm the idle grace timer 810 */ 811 unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; 812 813 if (time_after_eq(jiffies, end)) 814 cfqq = list_entry_cfqq(cfqd->idle_rr.next); 815 else 816 mod_timer(&cfqd->idle_class_timer, end); 817 } 818 819 __cfq_set_active_queue(cfqd, cfqq); 820 return cfqq; 821} 822 823#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) 824 825static int cfq_arm_slice_timer(struct cfq_data *cfqd) 826{ 827 struct cfq_queue *cfqq = cfqd->active_queue; 828 struct cfq_io_context *cic; 829 unsigned long sl; 830 831 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 832 833 /* 834 * idle is disabled, either manually or by past process history 835 */ 836 if (!cfqd->cfq_slice_idle) 837 return 0; 838 if (!cfq_cfqq_idle_window(cfqq)) 839 return 0; 840 /* 841 * task has exited, don't wait 842 */ 843 cic = cfqd->active_cic; 844 if (!cic || !cic->ioc->task) 845 return 0; 846 847 cfq_mark_cfqq_must_dispatch(cfqq); 848 cfq_mark_cfqq_wait_request(cfqq); 849 850 sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); 851 852 /* 853 * we don't want to idle for seeks, but we do want to allow 854 * fair distribution of slice time for a process doing back-to-back 855 * seeks. so allow a little bit of time for him to submit a new rq 856 */ 857 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 858 sl = min(sl, msecs_to_jiffies(2)); 859 860 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 861 return 1; 862} 863 864static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) 865{ 866 struct cfq_data *cfqd = q->elevator->elevator_data; 867 struct cfq_queue *cfqq = RQ_CFQQ(rq); 868 869 cfq_remove_request(rq); 870 cfqq->on_dispatch[rq_is_sync(rq)]++; 871 elv_dispatch_sort(q, rq); 872 873 rq = list_entry(q->queue_head.prev, struct request, queuelist); 874 cfqd->last_sector = rq->sector + rq->nr_sectors; 875} 876 877/* 878 * return expired entry, or NULL to just start from scratch in rbtree 879 */ 880static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) 881{ 882 struct cfq_data *cfqd = cfqq->cfqd; 883 struct request *rq; 884 int fifo; 885 886 if (cfq_cfqq_fifo_expire(cfqq)) 887 return NULL; 888 if (list_empty(&cfqq->fifo)) 889 return NULL; 890 891 fifo = cfq_cfqq_class_sync(cfqq); 892 rq = rq_entry_fifo(cfqq->fifo.next); 893 894 if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) { 895 cfq_mark_cfqq_fifo_expire(cfqq); 896 return rq; 897 } 898 899 return NULL; 900} 901 902static inline int 903cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 904{ 905 const int base_rq = cfqd->cfq_slice_async_rq; 906 907 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 908 909 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 910} 911 912/* 913 * get next queue for service 914 */ 915static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 916{ 917 struct cfq_queue *cfqq; 918 919 cfqq = cfqd->active_queue; 920 if (!cfqq) 921 goto new_queue; 922 923 /* 924 * slice has expired 925 */ 926 if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq)) 927 goto expire; 928 929 /* 930 * if queue has requests, dispatch one. if not, check if 931 * enough slice is left to wait for one 932 */ 933 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 934 goto keep_queue; 935 else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) { 936 cfqq = NULL; 937 goto keep_queue; 938 } else if (cfq_cfqq_class_sync(cfqq)) { 939 if (cfq_arm_slice_timer(cfqd)) 940 return NULL; 941 } 942 943expire: 944 cfq_slice_expired(cfqd, 0); 945new_queue: 946 cfqq = cfq_set_active_queue(cfqd); 947keep_queue: 948 return cfqq; 949} 950 951static int 952__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, 953 int max_dispatch) 954{ 955 int dispatched = 0; 956 957 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 958 959 do { 960 struct request *rq; 961 962 /* 963 * follow expired path, else get first next available 964 */ 965 if ((rq = cfq_check_fifo(cfqq)) == NULL) 966 rq = cfqq->next_rq; 967 968 /* 969 * finally, insert request into driver dispatch list 970 */ 971 cfq_dispatch_insert(cfqd->queue, rq); 972 973 cfqd->dispatch_slice++; 974 dispatched++; 975 976 if (!cfqd->active_cic) { 977 atomic_inc(&RQ_CIC(rq)->ioc->refcount); 978 cfqd->active_cic = RQ_CIC(rq); 979 } 980 981 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 982 break; 983 984 } while (dispatched < max_dispatch); 985 986 /* 987 * expire an async queue immediately if it has used up its slice. idle 988 * queue always expire after 1 dispatch round. 989 */ 990 if ((!cfq_cfqq_sync(cfqq) && 991 cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || 992 cfq_class_idle(cfqq)) { 993 cfqq->slice_end = jiffies + 1; 994 cfq_slice_expired(cfqd, 0); 995 } 996 997 return dispatched; 998} 999 1000static int 1001cfq_forced_dispatch_cfqqs(struct list_head *list) 1002{ 1003 struct cfq_queue *cfqq, *next; 1004 int dispatched; 1005 1006 dispatched = 0; 1007 list_for_each_entry_safe(cfqq, next, list, cfq_list) { 1008 while (cfqq->next_rq) { 1009 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1010 dispatched++; 1011 } 1012 BUG_ON(!list_empty(&cfqq->fifo)); 1013 } 1014 1015 return dispatched; 1016} 1017 1018static int 1019cfq_forced_dispatch(struct cfq_data *cfqd) 1020{ 1021 int i, dispatched = 0; 1022 1023 for (i = 0; i < CFQ_PRIO_LISTS; i++) 1024 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); 1025 1026 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr); 1027 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); 1028 dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr); 1029 1030 cfq_slice_expired(cfqd, 0); 1031 1032 BUG_ON(cfqd->busy_queues); 1033 1034 return dispatched; 1035} 1036 1037static int 1038cfq_dispatch_requests(request_queue_t *q, int force) 1039{ 1040 struct cfq_data *cfqd = q->elevator->elevator_data; 1041 struct cfq_queue *cfqq, *prev_cfqq; 1042 int dispatched; 1043 1044 if (!cfqd->busy_queues) 1045 return 0; 1046 1047 if (unlikely(force)) 1048 return cfq_forced_dispatch(cfqd); 1049 1050 dispatched = 0; 1051 prev_cfqq = NULL; 1052 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1053 int max_dispatch; 1054 1055 /* 1056 * Don't repeat dispatch from the previous queue. 1057 */ 1058 if (prev_cfqq == cfqq) 1059 break; 1060 1061 cfq_clear_cfqq_must_dispatch(cfqq); 1062 cfq_clear_cfqq_wait_request(cfqq); 1063 del_timer(&cfqd->idle_slice_timer); 1064 1065 max_dispatch = cfqd->cfq_quantum; 1066 if (cfq_class_idle(cfqq)) 1067 max_dispatch = 1; 1068 1069 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1070 1071 /* 1072 * If the dispatch cfqq has idling enabled and is still 1073 * the active queue, break out. 1074 */ 1075 if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue) 1076 break; 1077 1078 prev_cfqq = cfqq; 1079 } 1080 1081 return dispatched; 1082} 1083 1084/* 1085 * task holds one reference to the queue, dropped when task exits. each rq 1086 * in-flight on this queue also holds a reference, dropped when rq is freed. 1087 * 1088 * queue lock must be held here. 1089 */ 1090static void cfq_put_queue(struct cfq_queue *cfqq) 1091{ 1092 struct cfq_data *cfqd = cfqq->cfqd; 1093 1094 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1095 1096 if (!atomic_dec_and_test(&cfqq->ref)) 1097 return; 1098 1099 BUG_ON(rb_first(&cfqq->sort_list)); 1100 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1101 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1102 1103 if (unlikely(cfqd->active_queue == cfqq)) 1104 __cfq_slice_expired(cfqd, cfqq, 0); 1105 1106 /* 1107 * it's on the empty list and still hashed 1108 */ 1109 list_del(&cfqq->cfq_list); 1110 hlist_del(&cfqq->cfq_hash); 1111 kmem_cache_free(cfq_pool, cfqq); 1112} 1113 1114static struct cfq_queue * 1115__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, 1116 const int hashval) 1117{ 1118 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; 1119 struct hlist_node *entry; 1120 struct cfq_queue *__cfqq; 1121 1122 hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) { 1123 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); 1124 1125 if (__cfqq->key == key && (__p == prio || !prio)) 1126 return __cfqq; 1127 } 1128 1129 return NULL; 1130} 1131 1132static struct cfq_queue * 1133cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio) 1134{ 1135 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); 1136} 1137 1138static void cfq_free_io_context(struct io_context *ioc) 1139{ 1140 struct cfq_io_context *__cic; 1141 struct rb_node *n; 1142 int freed = 0; 1143 1144 while ((n = rb_first(&ioc->cic_root)) != NULL) { 1145 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1146 rb_erase(&__cic->rb_node, &ioc->cic_root); 1147 kmem_cache_free(cfq_ioc_pool, __cic); 1148 freed++; 1149 } 1150 1151 elv_ioc_count_mod(ioc_count, -freed); 1152 1153 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 1154 complete(ioc_gone); 1155} 1156 1157static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1158{ 1159 if (unlikely(cfqq == cfqd->active_queue)) 1160 __cfq_slice_expired(cfqd, cfqq, 0); 1161 1162 cfq_put_queue(cfqq); 1163} 1164 1165static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1166 struct cfq_io_context *cic) 1167{ 1168 list_del_init(&cic->queue_list); 1169 smp_wmb(); 1170 cic->key = NULL; 1171 1172 if (cic->cfqq[ASYNC]) { 1173 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1174 cic->cfqq[ASYNC] = NULL; 1175 } 1176 1177 if (cic->cfqq[SYNC]) { 1178 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1179 cic->cfqq[SYNC] = NULL; 1180 } 1181} 1182 1183 1184/* 1185 * Called with interrupts disabled 1186 */ 1187static void cfq_exit_single_io_context(struct cfq_io_context *cic) 1188{ 1189 struct cfq_data *cfqd = cic->key; 1190 1191 if (cfqd) { 1192 request_queue_t *q = cfqd->queue; 1193 1194 spin_lock_irq(q->queue_lock); 1195 __cfq_exit_single_io_context(cfqd, cic); 1196 spin_unlock_irq(q->queue_lock); 1197 } 1198} 1199 1200static void cfq_exit_io_context(struct io_context *ioc) 1201{ 1202 struct cfq_io_context *__cic; 1203 struct rb_node *n; 1204 1205 /* 1206 * put the reference this task is holding to the various queues 1207 */ 1208 1209 n = rb_first(&ioc->cic_root); 1210 while (n != NULL) { 1211 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1212 1213 cfq_exit_single_io_context(__cic); 1214 n = rb_next(n); 1215 } 1216} 1217 1218static struct cfq_io_context * 1219cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1220{ 1221 struct cfq_io_context *cic; 1222 1223 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); 1224 if (cic) { 1225 memset(cic, 0, sizeof(*cic)); 1226 cic->last_end_request = jiffies; 1227 INIT_LIST_HEAD(&cic->queue_list); 1228 cic->dtor = cfq_free_io_context; 1229 cic->exit = cfq_exit_io_context; 1230 elv_ioc_count_inc(ioc_count); 1231 } 1232 1233 return cic; 1234} 1235 1236static void cfq_init_prio_data(struct cfq_queue *cfqq) 1237{ 1238 struct task_struct *tsk = current; 1239 int ioprio_class; 1240 1241 if (!cfq_cfqq_prio_changed(cfqq)) 1242 return; 1243 1244 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); 1245 switch (ioprio_class) { 1246 default: 1247 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1248 case IOPRIO_CLASS_NONE: 1249 /* 1250 * no prio set, place us in the middle of the BE classes 1251 */ 1252 cfqq->ioprio = task_nice_ioprio(tsk); 1253 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1254 break; 1255 case IOPRIO_CLASS_RT: 1256 cfqq->ioprio = task_ioprio(tsk); 1257 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1258 break; 1259 case IOPRIO_CLASS_BE: 1260 cfqq->ioprio = task_ioprio(tsk); 1261 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1262 break; 1263 case IOPRIO_CLASS_IDLE: 1264 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1265 cfqq->ioprio = 7; 1266 cfq_clear_cfqq_idle_window(cfqq); 1267 break; 1268 } 1269 1270 /* 1271 * keep track of original prio settings in case we have to temporarily 1272 * elevate the priority of this queue 1273 */ 1274 cfqq->org_ioprio = cfqq->ioprio; 1275 cfqq->org_ioprio_class = cfqq->ioprio_class; 1276 1277 cfq_resort_rr_list(cfqq, 0); 1278 cfq_clear_cfqq_prio_changed(cfqq); 1279} 1280 1281static inline void changed_ioprio(struct cfq_io_context *cic) 1282{ 1283 struct cfq_data *cfqd = cic->key; 1284 struct cfq_queue *cfqq; 1285 unsigned long flags; 1286 1287 if (unlikely(!cfqd)) 1288 return; 1289 1290 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1291 1292 cfqq = cic->cfqq[ASYNC]; 1293 if (cfqq) { 1294 struct cfq_queue *new_cfqq; 1295 new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, 1296 GFP_ATOMIC); 1297 if (new_cfqq) { 1298 cic->cfqq[ASYNC] = new_cfqq; 1299 cfq_put_queue(cfqq); 1300 } 1301 } 1302 1303 cfqq = cic->cfqq[SYNC]; 1304 if (cfqq) 1305 cfq_mark_cfqq_prio_changed(cfqq); 1306 1307 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1308} 1309 1310static void cfq_ioc_set_ioprio(struct io_context *ioc) 1311{ 1312 struct cfq_io_context *cic; 1313 struct rb_node *n; 1314 1315 ioc->ioprio_changed = 0; 1316 1317 n = rb_first(&ioc->cic_root); 1318 while (n != NULL) { 1319 cic = rb_entry(n, struct cfq_io_context, rb_node); 1320 1321 changed_ioprio(cic); 1322 n = rb_next(n); 1323 } 1324} 1325 1326static struct cfq_queue * 1327cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, 1328 gfp_t gfp_mask) 1329{ 1330 const int hashval = hash_long(key, CFQ_QHASH_SHIFT); 1331 struct cfq_queue *cfqq, *new_cfqq = NULL; 1332 unsigned short ioprio; 1333 1334retry: 1335 ioprio = tsk->ioprio; 1336 cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); 1337 1338 if (!cfqq) { 1339 if (new_cfqq) { 1340 cfqq = new_cfqq; 1341 new_cfqq = NULL; 1342 } else if (gfp_mask & __GFP_WAIT) { 1343 /* 1344 * Inform the allocator of the fact that we will 1345 * just repeat this allocation if it fails, to allow 1346 * the allocator to do whatever it needs to attempt to 1347 * free memory. 1348 */ 1349 spin_unlock_irq(cfqd->queue->queue_lock); 1350 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); 1351 spin_lock_irq(cfqd->queue->queue_lock); 1352 goto retry; 1353 } else { 1354 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); 1355 if (!cfqq) 1356 goto out; 1357 } 1358 1359 memset(cfqq, 0, sizeof(*cfqq)); 1360 1361 INIT_HLIST_NODE(&cfqq->cfq_hash); 1362 INIT_LIST_HEAD(&cfqq->cfq_list); 1363 INIT_LIST_HEAD(&cfqq->fifo); 1364 1365 cfqq->key = key; 1366 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); 1367 atomic_set(&cfqq->ref, 0); 1368 cfqq->cfqd = cfqd; 1369 1370 cfq_mark_cfqq_idle_window(cfqq); 1371 cfq_mark_cfqq_prio_changed(cfqq); 1372 cfq_mark_cfqq_queue_new(cfqq); 1373 cfq_init_prio_data(cfqq); 1374 } 1375 1376 if (new_cfqq) 1377 kmem_cache_free(cfq_pool, new_cfqq); 1378 1379 atomic_inc(&cfqq->ref); 1380out: 1381 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1382 return cfqq; 1383} 1384 1385static void 1386cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1387{ 1388 WARN_ON(!list_empty(&cic->queue_list)); 1389 rb_erase(&cic->rb_node, &ioc->cic_root); 1390 kmem_cache_free(cfq_ioc_pool, cic); 1391 elv_ioc_count_dec(ioc_count); 1392} 1393 1394static struct cfq_io_context * 1395cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1396{ 1397 struct rb_node *n; 1398 struct cfq_io_context *cic; 1399 void *k, *key = cfqd; 1400 1401restart: 1402 n = ioc->cic_root.rb_node; 1403 while (n) { 1404 cic = rb_entry(n, struct cfq_io_context, rb_node); 1405 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1406 k = cic->key; 1407 if (unlikely(!k)) { 1408 cfq_drop_dead_cic(ioc, cic); 1409 goto restart; 1410 } 1411 1412 if (key < k) 1413 n = n->rb_left; 1414 else if (key > k) 1415 n = n->rb_right; 1416 else 1417 return cic; 1418 } 1419 1420 return NULL; 1421} 1422 1423static inline void 1424cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1425 struct cfq_io_context *cic) 1426{ 1427 struct rb_node **p; 1428 struct rb_node *parent; 1429 struct cfq_io_context *__cic; 1430 unsigned long flags; 1431 void *k; 1432 1433 cic->ioc = ioc; 1434 cic->key = cfqd; 1435 1436restart: 1437 parent = NULL; 1438 p = &ioc->cic_root.rb_node; 1439 while (*p) { 1440 parent = *p; 1441 __cic = rb_entry(parent, struct cfq_io_context, rb_node); 1442 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1443 k = __cic->key; 1444 if (unlikely(!k)) { 1445 cfq_drop_dead_cic(ioc, __cic); 1446 goto restart; 1447 } 1448 1449 if (cic->key < k) 1450 p = &(*p)->rb_left; 1451 else if (cic->key > k) 1452 p = &(*p)->rb_right; 1453 else 1454 BUG(); 1455 } 1456 1457 rb_link_node(&cic->rb_node, parent, p); 1458 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1459 1460 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1461 list_add(&cic->queue_list, &cfqd->cic_list); 1462 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1463} 1464 1465/* 1466 * Setup general io context and cfq io context. There can be several cfq 1467 * io contexts per general io context, if this process is doing io to more 1468 * than one device managed by cfq. 1469 */ 1470static struct cfq_io_context * 1471cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1472{ 1473 struct io_context *ioc = NULL; 1474 struct cfq_io_context *cic; 1475 1476 might_sleep_if(gfp_mask & __GFP_WAIT); 1477 1478 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1479 if (!ioc) 1480 return NULL; 1481 1482 cic = cfq_cic_rb_lookup(cfqd, ioc); 1483 if (cic) 1484 goto out; 1485 1486 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1487 if (cic == NULL) 1488 goto err; 1489 1490 cfq_cic_link(cfqd, ioc, cic); 1491out: 1492 smp_read_barrier_depends(); 1493 if (unlikely(ioc->ioprio_changed)) 1494 cfq_ioc_set_ioprio(ioc); 1495 1496 return cic; 1497err: 1498 put_io_context(ioc); 1499 return NULL; 1500} 1501 1502static void 1503cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1504{ 1505 unsigned long elapsed = jiffies - cic->last_end_request; 1506 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1507 1508 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1509 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1510 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1511} 1512 1513static void 1514cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) 1515{ 1516 sector_t sdist; 1517 u64 total; 1518 1519 if (cic->last_request_pos < rq->sector) 1520 sdist = rq->sector - cic->last_request_pos; 1521 else 1522 sdist = cic->last_request_pos - rq->sector; 1523 1524 /* 1525 * Don't allow the seek distance to get too large from the 1526 * odd fragment, pagein, etc 1527 */ 1528 if (cic->seek_samples <= 60) /* second&third seek */ 1529 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1530 else 1531 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1532 1533 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1534 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1535 total = cic->seek_total + (cic->seek_samples/2); 1536 do_div(total, cic->seek_samples); 1537 cic->seek_mean = (sector_t)total; 1538} 1539 1540/* 1541 * Disable idle window if the process thinks too long or seeks so much that 1542 * it doesn't matter 1543 */ 1544static void 1545cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1546 struct cfq_io_context *cic) 1547{ 1548 int enable_idle = cfq_cfqq_idle_window(cfqq); 1549 1550 if (!cic->ioc->task || !cfqd->cfq_slice_idle || 1551 (cfqd->hw_tag && CIC_SEEKY(cic))) 1552 enable_idle = 0; 1553 else if (sample_valid(cic->ttime_samples)) { 1554 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1555 enable_idle = 0; 1556 else 1557 enable_idle = 1; 1558 } 1559 1560 if (enable_idle) 1561 cfq_mark_cfqq_idle_window(cfqq); 1562 else 1563 cfq_clear_cfqq_idle_window(cfqq); 1564} 1565 1566 1567/* 1568 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1569 * no or if we aren't sure, a 1 will cause a preempt. 1570 */ 1571static int 1572cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1573 struct request *rq) 1574{ 1575 struct cfq_queue *cfqq = cfqd->active_queue; 1576 1577 if (cfq_class_idle(new_cfqq)) 1578 return 0; 1579 1580 if (!cfqq) 1581 return 0; 1582 1583 if (cfq_class_idle(cfqq)) 1584 return 1; 1585 if (!cfq_cfqq_wait_request(new_cfqq)) 1586 return 0; 1587 /* 1588 * if the new request is sync, but the currently running queue is 1589 * not, let the sync request have priority. 1590 */ 1591 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1592 return 1; 1593 /* 1594 * So both queues are sync. Let the new request get disk time if 1595 * it's a metadata request and the current queue is doing regular IO. 1596 */ 1597 if (rq_is_meta(rq) && !cfqq->meta_pending) 1598 return 1; 1599 1600 return 0; 1601} 1602 1603/* 1604 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1605 * let it have half of its nominal slice. 1606 */ 1607static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1608{ 1609 cfq_slice_expired(cfqd, 1); 1610 1611 /* 1612 * Put the new queue at the front of the of the current list, 1613 * so we know that it will be selected next. 1614 */ 1615 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1616 list_move(&cfqq->cfq_list, &cfqd->cur_rr); 1617 1618 cfqq->slice_end = 0; 1619 cfq_mark_cfqq_slice_new(cfqq); 1620} 1621 1622/* 1623 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1624 * something we should do about it 1625 */ 1626static void 1627cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1628 struct request *rq) 1629{ 1630 struct cfq_io_context *cic = RQ_CIC(rq); 1631 1632 if (rq_is_meta(rq)) 1633 cfqq->meta_pending++; 1634 1635 /* 1636 * check if this request is a better next-serve candidate)) { 1637 */ 1638 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 1639 BUG_ON(!cfqq->next_rq); 1640 1641 /* 1642 * we never wait for an async request and we don't allow preemption 1643 * of an async request. so just return early 1644 */ 1645 if (!rq_is_sync(rq)) { 1646 /* 1647 * sync process issued an async request, if it's waiting 1648 * then expire it and kick rq handling. 1649 */ 1650 if (cic == cfqd->active_cic && 1651 del_timer(&cfqd->idle_slice_timer)) { 1652 cfq_slice_expired(cfqd, 0); 1653 blk_start_queueing(cfqd->queue); 1654 } 1655 return; 1656 } 1657 1658 cfq_update_io_thinktime(cfqd, cic); 1659 cfq_update_io_seektime(cic, rq); 1660 cfq_update_idle_window(cfqd, cfqq, cic); 1661 1662 cic->last_request_pos = rq->sector + rq->nr_sectors; 1663 1664 if (cfqq == cfqd->active_queue) { 1665 /* 1666 * if we are waiting for a request for this queue, let it rip 1667 * immediately and flag that we must not expire this queue 1668 * just now 1669 */ 1670 if (cfq_cfqq_wait_request(cfqq)) { 1671 cfq_mark_cfqq_must_dispatch(cfqq); 1672 del_timer(&cfqd->idle_slice_timer); 1673 blk_start_queueing(cfqd->queue); 1674 } 1675 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1676 /* 1677 * not the active queue - expire current slice if it is 1678 * idle and has expired it's mean thinktime or this new queue 1679 * has some old slice time left and is of higher priority 1680 */ 1681 cfq_preempt_queue(cfqd, cfqq); 1682 cfq_mark_cfqq_must_dispatch(cfqq); 1683 blk_start_queueing(cfqd->queue); 1684 } 1685} 1686 1687static void cfq_insert_request(request_queue_t *q, struct request *rq) 1688{ 1689 struct cfq_data *cfqd = q->elevator->elevator_data; 1690 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1691 1692 cfq_init_prio_data(cfqq); 1693 1694 cfq_add_rq_rb(rq); 1695 1696 list_add_tail(&rq->queuelist, &cfqq->fifo); 1697 1698 cfq_rq_enqueued(cfqd, cfqq, rq); 1699} 1700 1701static void cfq_completed_request(request_queue_t *q, struct request *rq) 1702{ 1703 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1704 struct cfq_data *cfqd = cfqq->cfqd; 1705 const int sync = rq_is_sync(rq); 1706 unsigned long now; 1707 1708 now = jiffies; 1709 1710 WARN_ON(!cfqd->rq_in_driver); 1711 WARN_ON(!cfqq->on_dispatch[sync]); 1712 cfqd->rq_in_driver--; 1713 cfqq->on_dispatch[sync]--; 1714 cfqq->service_last = now; 1715 1716 if (!cfq_class_idle(cfqq)) 1717 cfqd->last_end_request = now; 1718 1719 cfq_resort_rr_list(cfqq, 0); 1720 1721 if (sync) 1722 RQ_CIC(rq)->last_end_request = now; 1723 1724 /* 1725 * If this is the active queue, check if it needs to be expired, 1726 * or if we want to idle in case it has no pending requests. 1727 */ 1728 if (cfqd->active_queue == cfqq) { 1729 if (cfq_cfqq_slice_new(cfqq)) { 1730 cfq_set_prio_slice(cfqd, cfqq); 1731 cfq_clear_cfqq_slice_new(cfqq); 1732 } 1733 if (cfq_slice_used(cfqq)) 1734 cfq_slice_expired(cfqd, 0); 1735 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { 1736 if (!cfq_arm_slice_timer(cfqd)) 1737 cfq_schedule_dispatch(cfqd); 1738 } 1739 } 1740} 1741 1742/* 1743 * we temporarily boost lower priority queues if they are holding fs exclusive 1744 * resources. they are boosted to normal prio (CLASS_BE/4) 1745 */ 1746static void cfq_prio_boost(struct cfq_queue *cfqq) 1747{ 1748 const int ioprio_class = cfqq->ioprio_class; 1749 const int ioprio = cfqq->ioprio; 1750 1751 if (has_fs_excl()) { 1752 /* 1753 * boost idle prio on transactions that would lock out other 1754 * users of the filesystem 1755 */ 1756 if (cfq_class_idle(cfqq)) 1757 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1758 if (cfqq->ioprio > IOPRIO_NORM) 1759 cfqq->ioprio = IOPRIO_NORM; 1760 } else { 1761 /* 1762 * check if we need to unboost the queue 1763 */ 1764 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 1765 cfqq->ioprio_class = cfqq->org_ioprio_class; 1766 if (cfqq->ioprio != cfqq->org_ioprio) 1767 cfqq->ioprio = cfqq->org_ioprio; 1768 } 1769 1770 /* 1771 * refile between round-robin lists if we moved the priority class 1772 */ 1773 if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio)) 1774 cfq_resort_rr_list(cfqq, 0); 1775} 1776 1777static inline int __cfq_may_queue(struct cfq_queue *cfqq) 1778{ 1779 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1780 !cfq_cfqq_must_alloc_slice(cfqq)) { 1781 cfq_mark_cfqq_must_alloc_slice(cfqq); 1782 return ELV_MQUEUE_MUST; 1783 } 1784 1785 return ELV_MQUEUE_MAY; 1786} 1787 1788static int cfq_may_queue(request_queue_t *q, int rw) 1789{ 1790 struct cfq_data *cfqd = q->elevator->elevator_data; 1791 struct task_struct *tsk = current; 1792 struct cfq_queue *cfqq; 1793 unsigned int key; 1794 1795 key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC); 1796 1797 /* 1798 * don't force setup of a queue from here, as a call to may_queue 1799 * does not necessarily imply that a request actually will be queued. 1800 * so just lookup a possibly existing queue, or return 'may queue' 1801 * if that fails 1802 */ 1803 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 1804 if (cfqq) { 1805 cfq_init_prio_data(cfqq); 1806 cfq_prio_boost(cfqq); 1807 1808 return __cfq_may_queue(cfqq); 1809 } 1810 1811 return ELV_MQUEUE_MAY; 1812} 1813 1814/* 1815 * queue lock held here 1816 */ 1817static void cfq_put_request(struct request *rq) 1818{ 1819 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1820 1821 if (cfqq) { 1822 const int rw = rq_data_dir(rq); 1823 1824 BUG_ON(!cfqq->allocated[rw]); 1825 cfqq->allocated[rw]--; 1826 1827 put_io_context(RQ_CIC(rq)->ioc); 1828 1829 rq->elevator_private = NULL; 1830 rq->elevator_private2 = NULL; 1831 1832 cfq_put_queue(cfqq); 1833 } 1834} 1835 1836/* 1837 * Allocate cfq data structures associated with this request. 1838 */ 1839static int 1840cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 1841{ 1842 struct cfq_data *cfqd = q->elevator->elevator_data; 1843 struct task_struct *tsk = current; 1844 struct cfq_io_context *cic; 1845 const int rw = rq_data_dir(rq); 1846 const int is_sync = rq_is_sync(rq); 1847 pid_t key = cfq_queue_pid(tsk, rw, is_sync); 1848 struct cfq_queue *cfqq; 1849 unsigned long flags; 1850 1851 might_sleep_if(gfp_mask & __GFP_WAIT); 1852 1853 cic = cfq_get_io_context(cfqd, gfp_mask); 1854 1855 spin_lock_irqsave(q->queue_lock, flags); 1856 1857 if (!cic) 1858 goto queue_fail; 1859 1860 if (!cic->cfqq[is_sync]) { 1861 cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); 1862 if (!cfqq) 1863 goto queue_fail; 1864 1865 cic->cfqq[is_sync] = cfqq; 1866 } else 1867 cfqq = cic->cfqq[is_sync]; 1868 1869 cfqq->allocated[rw]++; 1870 cfq_clear_cfqq_must_alloc(cfqq); 1871 atomic_inc(&cfqq->ref); 1872 1873 spin_unlock_irqrestore(q->queue_lock, flags); 1874 1875 rq->elevator_private = cic; 1876 rq->elevator_private2 = cfqq; 1877 return 0; 1878 1879queue_fail: 1880 if (cic) 1881 put_io_context(cic->ioc); 1882 1883 cfq_schedule_dispatch(cfqd); 1884 spin_unlock_irqrestore(q->queue_lock, flags); 1885 return 1; 1886} 1887 1888static void cfq_kick_queue(struct work_struct *work) 1889{ 1890 struct cfq_data *cfqd = 1891 container_of(work, struct cfq_data, unplug_work); 1892 request_queue_t *q = cfqd->queue; 1893 unsigned long flags; 1894 1895 spin_lock_irqsave(q->queue_lock, flags); 1896 blk_start_queueing(q); 1897 spin_unlock_irqrestore(q->queue_lock, flags); 1898} 1899 1900/* 1901 * Timer running if the active_queue is currently idling inside its time slice 1902 */ 1903static void cfq_idle_slice_timer(unsigned long data) 1904{ 1905 struct cfq_data *cfqd = (struct cfq_data *) data; 1906 struct cfq_queue *cfqq; 1907 unsigned long flags; 1908 1909 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1910 1911 if ((cfqq = cfqd->active_queue) != NULL) { 1912 /* 1913 * expired 1914 */ 1915 if (cfq_slice_used(cfqq)) 1916 goto expire; 1917 1918 /* 1919 * only expire and reinvoke request handler, if there are 1920 * other queues with pending requests 1921 */ 1922 if (!cfqd->busy_queues) 1923 goto out_cont; 1924 1925 /* 1926 * not expired and it has a request pending, let it dispatch 1927 */ 1928 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 1929 cfq_mark_cfqq_must_dispatch(cfqq); 1930 goto out_kick; 1931 } 1932 } 1933expire: 1934 cfq_slice_expired(cfqd, 0); 1935out_kick: 1936 cfq_schedule_dispatch(cfqd); 1937out_cont: 1938 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1939} 1940 1941/* 1942 * Timer running if an idle class queue is waiting for service 1943 */ 1944static void cfq_idle_class_timer(unsigned long data) 1945{ 1946 struct cfq_data *cfqd = (struct cfq_data *) data; 1947 unsigned long flags, end; 1948 1949 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1950 1951 /* 1952 * race with a non-idle queue, reset timer 1953 */ 1954 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 1955 if (!time_after_eq(jiffies, end)) 1956 mod_timer(&cfqd->idle_class_timer, end); 1957 else 1958 cfq_schedule_dispatch(cfqd); 1959 1960 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1961} 1962 1963static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 1964{ 1965 del_timer_sync(&cfqd->idle_slice_timer); 1966 del_timer_sync(&cfqd->idle_class_timer); 1967 blk_sync_queue(cfqd->queue); 1968} 1969 1970static void cfq_exit_queue(elevator_t *e) 1971{ 1972 struct cfq_data *cfqd = e->elevator_data; 1973 request_queue_t *q = cfqd->queue; 1974 1975 cfq_shutdown_timer_wq(cfqd); 1976 1977 spin_lock_irq(q->queue_lock); 1978 1979 if (cfqd->active_queue) 1980 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 1981 1982 while (!list_empty(&cfqd->cic_list)) { 1983 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 1984 struct cfq_io_context, 1985 queue_list); 1986 1987 __cfq_exit_single_io_context(cfqd, cic); 1988 } 1989 1990 spin_unlock_irq(q->queue_lock); 1991 1992 cfq_shutdown_timer_wq(cfqd); 1993 1994 kfree(cfqd->cfq_hash); 1995 kfree(cfqd); 1996} 1997 1998static void *cfq_init_queue(request_queue_t *q) 1999{ 2000 struct cfq_data *cfqd; 2001 int i; 2002 2003 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 2004 if (!cfqd) 2005 return NULL; 2006 2007 memset(cfqd, 0, sizeof(*cfqd)); 2008 2009 for (i = 0; i < CFQ_PRIO_LISTS; i++) 2010 INIT_LIST_HEAD(&cfqd->rr_list[i]); 2011 2012 INIT_LIST_HEAD(&cfqd->busy_rr); 2013 INIT_LIST_HEAD(&cfqd->cur_rr); 2014 INIT_LIST_HEAD(&cfqd->idle_rr); 2015 INIT_LIST_HEAD(&cfqd->cic_list); 2016 2017 cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node); 2018 if (!cfqd->cfq_hash) 2019 goto out_free; 2020 2021 for (i = 0; i < CFQ_QHASH_ENTRIES; i++) 2022 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); 2023 2024 cfqd->queue = q; 2025 2026 init_timer(&cfqd->idle_slice_timer); 2027 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2028 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2029 2030 init_timer(&cfqd->idle_class_timer); 2031 cfqd->idle_class_timer.function = cfq_idle_class_timer; 2032 cfqd->idle_class_timer.data = (unsigned long) cfqd; 2033 2034 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2035 2036 cfqd->cfq_quantum = cfq_quantum; 2037 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2038 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2039 cfqd->cfq_back_max = cfq_back_max; 2040 cfqd->cfq_back_penalty = cfq_back_penalty; 2041 cfqd->cfq_slice[0] = cfq_slice_async; 2042 cfqd->cfq_slice[1] = cfq_slice_sync; 2043 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2044 cfqd->cfq_slice_idle = cfq_slice_idle; 2045 2046 return cfqd; 2047out_free: 2048 kfree(cfqd); 2049 return NULL; 2050} 2051 2052static void cfq_slab_kill(void) 2053{ 2054 if (cfq_pool) 2055 kmem_cache_destroy(cfq_pool); 2056 if (cfq_ioc_pool) 2057 kmem_cache_destroy(cfq_ioc_pool); 2058} 2059 2060static int __init cfq_slab_setup(void) 2061{ 2062 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, 2063 NULL, NULL); 2064 if (!cfq_pool) 2065 goto fail; 2066 2067 cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool", 2068 sizeof(struct cfq_io_context), 0, 0, NULL, NULL); 2069 if (!cfq_ioc_pool) 2070 goto fail; 2071 2072 return 0; 2073fail: 2074 cfq_slab_kill(); 2075 return -ENOMEM; 2076} 2077 2078/* 2079 * sysfs parts below --> 2080 */ 2081 2082static ssize_t 2083cfq_var_show(unsigned int var, char *page) 2084{ 2085 return sprintf(page, "%d\n", var); 2086} 2087 2088static ssize_t 2089cfq_var_store(unsigned int *var, const char *page, size_t count) 2090{ 2091 char *p = (char *) page; 2092 2093 *var = simple_strtoul(p, &p, 10); 2094 return count; 2095} 2096 2097#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2098static ssize_t __FUNC(elevator_t *e, char *page) \ 2099{ \ 2100 struct cfq_data *cfqd = e->elevator_data; \ 2101 unsigned int __data = __VAR; \ 2102 if (__CONV) \ 2103 __data = jiffies_to_msecs(__data); \ 2104 return cfq_var_show(__data, (page)); \ 2105} 2106SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2107SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2108SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2109SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2110SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2111SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2112SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2113SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2114SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2115#undef SHOW_FUNCTION 2116 2117#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2118static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 2119{ \ 2120 struct cfq_data *cfqd = e->elevator_data; \ 2121 unsigned int __data; \ 2122 int ret = cfq_var_store(&__data, (page), count); \ 2123 if (__data < (MIN)) \ 2124 __data = (MIN); \ 2125 else if (__data > (MAX)) \ 2126 __data = (MAX); \ 2127 if (__CONV) \ 2128 *(__PTR) = msecs_to_jiffies(__data); \ 2129 else \ 2130 *(__PTR) = __data; \ 2131 return ret; \ 2132} 2133STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2134STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2135STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2136STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2137STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2138STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2139STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2140STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2141STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2142#undef STORE_FUNCTION 2143 2144#define CFQ_ATTR(name) \ 2145 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2146 2147static struct elv_fs_entry cfq_attrs[] = { 2148 CFQ_ATTR(quantum), 2149 CFQ_ATTR(fifo_expire_sync), 2150 CFQ_ATTR(fifo_expire_async), 2151 CFQ_ATTR(back_seek_max), 2152 CFQ_ATTR(back_seek_penalty), 2153 CFQ_ATTR(slice_sync), 2154 CFQ_ATTR(slice_async), 2155 CFQ_ATTR(slice_async_rq), 2156 CFQ_ATTR(slice_idle), 2157 __ATTR_NULL 2158}; 2159 2160static struct elevator_type iosched_cfq = { 2161 .ops = { 2162 .elevator_merge_fn = cfq_merge, 2163 .elevator_merged_fn = cfq_merged_request, 2164 .elevator_merge_req_fn = cfq_merged_requests, 2165 .elevator_allow_merge_fn = cfq_allow_merge, 2166 .elevator_dispatch_fn = cfq_dispatch_requests, 2167 .elevator_add_req_fn = cfq_insert_request, 2168 .elevator_activate_req_fn = cfq_activate_request, 2169 .elevator_deactivate_req_fn = cfq_deactivate_request, 2170 .elevator_queue_empty_fn = cfq_queue_empty, 2171 .elevator_completed_req_fn = cfq_completed_request, 2172 .elevator_former_req_fn = elv_rb_former_request, 2173 .elevator_latter_req_fn = elv_rb_latter_request, 2174 .elevator_set_req_fn = cfq_set_request, 2175 .elevator_put_req_fn = cfq_put_request, 2176 .elevator_may_queue_fn = cfq_may_queue, 2177 .elevator_init_fn = cfq_init_queue, 2178 .elevator_exit_fn = cfq_exit_queue, 2179 .trim = cfq_free_io_context, 2180 }, 2181 .elevator_attrs = cfq_attrs, 2182 .elevator_name = "cfq", 2183 .elevator_owner = THIS_MODULE, 2184}; 2185 2186static int __init cfq_init(void) 2187{ 2188 int ret; 2189 2190 /* 2191 * could be 0 on HZ < 1000 setups 2192 */ 2193 if (!cfq_slice_async) 2194 cfq_slice_async = 1; 2195 if (!cfq_slice_idle) 2196 cfq_slice_idle = 1; 2197 2198 if (cfq_slab_setup()) 2199 return -ENOMEM; 2200 2201 ret = elv_register(&iosched_cfq); 2202 if (ret) 2203 cfq_slab_kill(); 2204 2205 return ret; 2206} 2207 2208static void __exit cfq_exit(void) 2209{ 2210 DECLARE_COMPLETION_ONSTACK(all_gone); 2211 elv_unregister(&iosched_cfq); 2212 ioc_gone = &all_gone; 2213 /* ioc_gone's update must be visible before reading ioc_count */ 2214 smp_wmb(); 2215 if (elv_ioc_count_read(ioc_count)) 2216 wait_for_completion(ioc_gone); 2217 synchronize_rcu(); 2218 cfq_slab_kill(); 2219} 2220 2221module_init(cfq_init); 2222module_exit(cfq_exit); 2223 2224MODULE_AUTHOR("Jens Axboe"); 2225MODULE_LICENSE("GPL"); 2226MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 2227