cfq-iosched.c revision ff6657c6c8ac99444e5dd4c4f7c1dc9271173382
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/rbtree.h> 13#include <linux/ioprio.h> 14#include <linux/blktrace_api.h> 15 16/* 17 * tunables 18 */ 19/* max queue in one round of service */ 20static const int cfq_quantum = 4; 21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 22/* maximum backwards seek, in KiB */ 23static const int cfq_back_max = 16 * 1024; 24/* penalty of a backwards seek */ 25static const int cfq_back_penalty = 2; 26static const int cfq_slice_sync = HZ / 10; 27static int cfq_slice_async = HZ / 25; 28static const int cfq_slice_async_rq = 2; 29static int cfq_slice_idle = HZ / 125; 30 31/* 32 * offset from end of service tree 33 */ 34#define CFQ_IDLE_DELAY (HZ / 5) 35 36/* 37 * below this threshold, we consider thinktime immediate 38 */ 39#define CFQ_MIN_TT (2) 40 41#define CFQ_SLICE_SCALE (5) 42#define CFQ_HW_QUEUE_MIN (5) 43 44#define RQ_CIC(rq) \ 45 ((struct cfq_io_context *) (rq)->elevator_private) 46#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 47 48static struct kmem_cache *cfq_pool; 49static struct kmem_cache *cfq_ioc_pool; 50 51static DEFINE_PER_CPU(unsigned long, ioc_count); 52static struct completion *ioc_gone; 53static DEFINE_SPINLOCK(ioc_gone_lock); 54 55#define CFQ_PRIO_LISTS IOPRIO_BE_NR 56#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 57#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 58 59#define sample_valid(samples) ((samples) > 80) 60 61/* 62 * Most of our rbtree usage is for sorting with min extraction, so 63 * if we cache the leftmost node we don't have to walk down the tree 64 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 65 * move this into the elevator for the rq sorting as well. 66 */ 67struct cfq_rb_root { 68 struct rb_root rb; 69 struct rb_node *left; 70}; 71#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 72 73/* 74 * Per block device queue structure 75 */ 76struct cfq_data { 77 struct request_queue *queue; 78 79 /* 80 * rr list of queues with requests and the count of them 81 */ 82 struct cfq_rb_root service_tree; 83 unsigned int busy_queues; 84 /* 85 * Used to track any pending rt requests so we can pre-empt current 86 * non-RT cfqq in service when this value is non-zero. 87 */ 88 unsigned int busy_rt_queues; 89 90 int rq_in_driver; 91 int sync_flight; 92 93 /* 94 * queue-depth detection 95 */ 96 int rq_queued; 97 int hw_tag; 98 int hw_tag_samples; 99 int rq_in_driver_peak; 100 101 /* 102 * idle window management 103 */ 104 struct timer_list idle_slice_timer; 105 struct work_struct unplug_work; 106 107 struct cfq_queue *active_queue; 108 struct cfq_io_context *active_cic; 109 110 /* 111 * async queue for each priority case 112 */ 113 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 114 struct cfq_queue *async_idle_cfqq; 115 116 sector_t last_position; 117 unsigned long last_end_request; 118 119 /* 120 * tunables, see top of file 121 */ 122 unsigned int cfq_quantum; 123 unsigned int cfq_fifo_expire[2]; 124 unsigned int cfq_back_penalty; 125 unsigned int cfq_back_max; 126 unsigned int cfq_slice[2]; 127 unsigned int cfq_slice_async_rq; 128 unsigned int cfq_slice_idle; 129 130 struct list_head cic_list; 131}; 132 133/* 134 * Per process-grouping structure 135 */ 136struct cfq_queue { 137 /* reference count */ 138 atomic_t ref; 139 /* various state flags, see below */ 140 unsigned int flags; 141 /* parent cfq_data */ 142 struct cfq_data *cfqd; 143 /* service_tree member */ 144 struct rb_node rb_node; 145 /* service_tree key */ 146 unsigned long rb_key; 147 /* sorted list of pending requests */ 148 struct rb_root sort_list; 149 /* if fifo isn't expired, next request to serve */ 150 struct request *next_rq; 151 /* requests queued in sort_list */ 152 int queued[2]; 153 /* currently allocated requests */ 154 int allocated[2]; 155 /* fifo list of requests in sort_list */ 156 struct list_head fifo; 157 158 unsigned long slice_end; 159 long slice_resid; 160 unsigned int slice_dispatch; 161 162 /* pending metadata requests */ 163 int meta_pending; 164 /* number of requests that are on the dispatch list or inside driver */ 165 int dispatched; 166 167 /* io prio of this group */ 168 unsigned short ioprio, org_ioprio; 169 unsigned short ioprio_class, org_ioprio_class; 170 171 pid_t pid; 172}; 173 174enum cfqq_state_flags { 175 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 176 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 177 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ 178 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 179 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 180 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 181 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 182 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 183 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 184 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 185}; 186 187#define CFQ_CFQQ_FNS(name) \ 188static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 189{ \ 190 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 191} \ 192static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 193{ \ 194 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 195} \ 196static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 197{ \ 198 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 199} 200 201CFQ_CFQQ_FNS(on_rr); 202CFQ_CFQQ_FNS(wait_request); 203CFQ_CFQQ_FNS(must_dispatch); 204CFQ_CFQQ_FNS(must_alloc); 205CFQ_CFQQ_FNS(must_alloc_slice); 206CFQ_CFQQ_FNS(fifo_expire); 207CFQ_CFQQ_FNS(idle_window); 208CFQ_CFQQ_FNS(prio_changed); 209CFQ_CFQQ_FNS(slice_new); 210CFQ_CFQQ_FNS(sync); 211#undef CFQ_CFQQ_FNS 212 213#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 214 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 215#define cfq_log(cfqd, fmt, args...) \ 216 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 217 218static void cfq_dispatch_insert(struct request_queue *, struct request *); 219static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 220 struct io_context *, gfp_t); 221static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 222 struct io_context *); 223 224static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 225 int is_sync) 226{ 227 return cic->cfqq[!!is_sync]; 228} 229 230static inline void cic_set_cfqq(struct cfq_io_context *cic, 231 struct cfq_queue *cfqq, int is_sync) 232{ 233 cic->cfqq[!!is_sync] = cfqq; 234} 235 236/* 237 * We regard a request as SYNC, if it's either a read or has the SYNC bit 238 * set (in which case it could also be direct WRITE). 239 */ 240static inline int cfq_bio_sync(struct bio *bio) 241{ 242 if (bio_data_dir(bio) == READ || bio_sync(bio)) 243 return 1; 244 245 return 0; 246} 247 248/* 249 * scheduler run of queue, if there are requests pending and no one in the 250 * driver that will restart queueing 251 */ 252static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 253{ 254 if (cfqd->busy_queues) { 255 cfq_log(cfqd, "schedule dispatch"); 256 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 257 } 258} 259 260static int cfq_queue_empty(struct request_queue *q) 261{ 262 struct cfq_data *cfqd = q->elevator->elevator_data; 263 264 return !cfqd->busy_queues; 265} 266 267/* 268 * Scale schedule slice based on io priority. Use the sync time slice only 269 * if a queue is marked sync and has sync io queued. A sync queue with async 270 * io only, should not get full sync slice length. 271 */ 272static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 273 unsigned short prio) 274{ 275 const int base_slice = cfqd->cfq_slice[sync]; 276 277 WARN_ON(prio >= IOPRIO_BE_NR); 278 279 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 280} 281 282static inline int 283cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 284{ 285 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 286} 287 288static inline void 289cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 290{ 291 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 292 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 293} 294 295/* 296 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 297 * isn't valid until the first request from the dispatch is activated 298 * and the slice time set. 299 */ 300static inline int cfq_slice_used(struct cfq_queue *cfqq) 301{ 302 if (cfq_cfqq_slice_new(cfqq)) 303 return 0; 304 if (time_before(jiffies, cfqq->slice_end)) 305 return 0; 306 307 return 1; 308} 309 310/* 311 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 312 * We choose the request that is closest to the head right now. Distance 313 * behind the head is penalized and only allowed to a certain extent. 314 */ 315static struct request * 316cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 317{ 318 sector_t last, s1, s2, d1 = 0, d2 = 0; 319 unsigned long back_max; 320#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 321#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 322 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 323 324 if (rq1 == NULL || rq1 == rq2) 325 return rq2; 326 if (rq2 == NULL) 327 return rq1; 328 329 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 330 return rq1; 331 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 332 return rq2; 333 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 334 return rq1; 335 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 336 return rq2; 337 338 s1 = rq1->sector; 339 s2 = rq2->sector; 340 341 last = cfqd->last_position; 342 343 /* 344 * by definition, 1KiB is 2 sectors 345 */ 346 back_max = cfqd->cfq_back_max * 2; 347 348 /* 349 * Strict one way elevator _except_ in the case where we allow 350 * short backward seeks which are biased as twice the cost of a 351 * similar forward seek. 352 */ 353 if (s1 >= last) 354 d1 = s1 - last; 355 else if (s1 + back_max >= last) 356 d1 = (last - s1) * cfqd->cfq_back_penalty; 357 else 358 wrap |= CFQ_RQ1_WRAP; 359 360 if (s2 >= last) 361 d2 = s2 - last; 362 else if (s2 + back_max >= last) 363 d2 = (last - s2) * cfqd->cfq_back_penalty; 364 else 365 wrap |= CFQ_RQ2_WRAP; 366 367 /* Found required data */ 368 369 /* 370 * By doing switch() on the bit mask "wrap" we avoid having to 371 * check two variables for all permutations: --> faster! 372 */ 373 switch (wrap) { 374 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 375 if (d1 < d2) 376 return rq1; 377 else if (d2 < d1) 378 return rq2; 379 else { 380 if (s1 >= s2) 381 return rq1; 382 else 383 return rq2; 384 } 385 386 case CFQ_RQ2_WRAP: 387 return rq1; 388 case CFQ_RQ1_WRAP: 389 return rq2; 390 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 391 default: 392 /* 393 * Since both rqs are wrapped, 394 * start with the one that's further behind head 395 * (--> only *one* back seek required), 396 * since back seek takes more time than forward. 397 */ 398 if (s1 <= s2) 399 return rq1; 400 else 401 return rq2; 402 } 403} 404 405/* 406 * The below is leftmost cache rbtree addon 407 */ 408static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 409{ 410 if (!root->left) 411 root->left = rb_first(&root->rb); 412 413 if (root->left) 414 return rb_entry(root->left, struct cfq_queue, rb_node); 415 416 return NULL; 417} 418 419static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 420{ 421 if (root->left == n) 422 root->left = NULL; 423 424 rb_erase(n, &root->rb); 425 RB_CLEAR_NODE(n); 426} 427 428/* 429 * would be nice to take fifo expire time into account as well 430 */ 431static struct request * 432cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 433 struct request *last) 434{ 435 struct rb_node *rbnext = rb_next(&last->rb_node); 436 struct rb_node *rbprev = rb_prev(&last->rb_node); 437 struct request *next = NULL, *prev = NULL; 438 439 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 440 441 if (rbprev) 442 prev = rb_entry_rq(rbprev); 443 444 if (rbnext) 445 next = rb_entry_rq(rbnext); 446 else { 447 rbnext = rb_first(&cfqq->sort_list); 448 if (rbnext && rbnext != &last->rb_node) 449 next = rb_entry_rq(rbnext); 450 } 451 452 return cfq_choose_req(cfqd, next, prev); 453} 454 455static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 456 struct cfq_queue *cfqq) 457{ 458 /* 459 * just an approximation, should be ok. 460 */ 461 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - 462 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 463} 464 465/* 466 * The cfqd->service_tree holds all pending cfq_queue's that have 467 * requests waiting to be processed. It is sorted in the order that 468 * we will service the queues. 469 */ 470static void cfq_service_tree_add(struct cfq_data *cfqd, 471 struct cfq_queue *cfqq, int add_front) 472{ 473 struct rb_node **p, *parent; 474 struct cfq_queue *__cfqq; 475 unsigned long rb_key; 476 int left; 477 478 if (cfq_class_idle(cfqq)) { 479 rb_key = CFQ_IDLE_DELAY; 480 parent = rb_last(&cfqd->service_tree.rb); 481 if (parent && parent != &cfqq->rb_node) { 482 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 483 rb_key += __cfqq->rb_key; 484 } else 485 rb_key += jiffies; 486 } else if (!add_front) { 487 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 488 rb_key += cfqq->slice_resid; 489 cfqq->slice_resid = 0; 490 } else 491 rb_key = 0; 492 493 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 494 /* 495 * same position, nothing more to do 496 */ 497 if (rb_key == cfqq->rb_key) 498 return; 499 500 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 501 } 502 503 left = 1; 504 parent = NULL; 505 p = &cfqd->service_tree.rb.rb_node; 506 while (*p) { 507 struct rb_node **n; 508 509 parent = *p; 510 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 511 512 /* 513 * sort RT queues first, we always want to give 514 * preference to them. IDLE queues goes to the back. 515 * after that, sort on the next service time. 516 */ 517 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 518 n = &(*p)->rb_left; 519 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 520 n = &(*p)->rb_right; 521 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) 522 n = &(*p)->rb_left; 523 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 524 n = &(*p)->rb_right; 525 else if (rb_key < __cfqq->rb_key) 526 n = &(*p)->rb_left; 527 else 528 n = &(*p)->rb_right; 529 530 if (n == &(*p)->rb_right) 531 left = 0; 532 533 p = n; 534 } 535 536 if (left) 537 cfqd->service_tree.left = &cfqq->rb_node; 538 539 cfqq->rb_key = rb_key; 540 rb_link_node(&cfqq->rb_node, parent, p); 541 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 542} 543 544/* 545 * Update cfqq's position in the service tree. 546 */ 547static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 548{ 549 /* 550 * Resorting requires the cfqq to be on the RR list already. 551 */ 552 if (cfq_cfqq_on_rr(cfqq)) 553 cfq_service_tree_add(cfqd, cfqq, 0); 554} 555 556/* 557 * add to busy list of queues for service, trying to be fair in ordering 558 * the pending list according to last request service 559 */ 560static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 561{ 562 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); 563 BUG_ON(cfq_cfqq_on_rr(cfqq)); 564 cfq_mark_cfqq_on_rr(cfqq); 565 cfqd->busy_queues++; 566 if (cfq_class_rt(cfqq)) 567 cfqd->busy_rt_queues++; 568 569 cfq_resort_rr_list(cfqd, cfqq); 570} 571 572/* 573 * Called when the cfqq no longer has requests pending, remove it from 574 * the service tree. 575 */ 576static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 577{ 578 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); 579 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 580 cfq_clear_cfqq_on_rr(cfqq); 581 582 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 583 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 584 585 BUG_ON(!cfqd->busy_queues); 586 cfqd->busy_queues--; 587 if (cfq_class_rt(cfqq)) 588 cfqd->busy_rt_queues--; 589} 590 591/* 592 * rb tree support functions 593 */ 594static void cfq_del_rq_rb(struct request *rq) 595{ 596 struct cfq_queue *cfqq = RQ_CFQQ(rq); 597 struct cfq_data *cfqd = cfqq->cfqd; 598 const int sync = rq_is_sync(rq); 599 600 BUG_ON(!cfqq->queued[sync]); 601 cfqq->queued[sync]--; 602 603 elv_rb_del(&cfqq->sort_list, rq); 604 605 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 606 cfq_del_cfqq_rr(cfqd, cfqq); 607} 608 609static void cfq_add_rq_rb(struct request *rq) 610{ 611 struct cfq_queue *cfqq = RQ_CFQQ(rq); 612 struct cfq_data *cfqd = cfqq->cfqd; 613 struct request *__alias; 614 615 cfqq->queued[rq_is_sync(rq)]++; 616 617 /* 618 * looks a little odd, but the first insert might return an alias. 619 * if that happens, put the alias on the dispatch list 620 */ 621 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 622 cfq_dispatch_insert(cfqd->queue, __alias); 623 624 if (!cfq_cfqq_on_rr(cfqq)) 625 cfq_add_cfqq_rr(cfqd, cfqq); 626 627 /* 628 * check if this request is a better next-serve candidate 629 */ 630 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 631 BUG_ON(!cfqq->next_rq); 632} 633 634static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 635{ 636 elv_rb_del(&cfqq->sort_list, rq); 637 cfqq->queued[rq_is_sync(rq)]--; 638 cfq_add_rq_rb(rq); 639} 640 641static struct request * 642cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 643{ 644 struct task_struct *tsk = current; 645 struct cfq_io_context *cic; 646 struct cfq_queue *cfqq; 647 648 cic = cfq_cic_lookup(cfqd, tsk->io_context); 649 if (!cic) 650 return NULL; 651 652 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 653 if (cfqq) { 654 sector_t sector = bio->bi_sector + bio_sectors(bio); 655 656 return elv_rb_find(&cfqq->sort_list, sector); 657 } 658 659 return NULL; 660} 661 662static void cfq_activate_request(struct request_queue *q, struct request *rq) 663{ 664 struct cfq_data *cfqd = q->elevator->elevator_data; 665 666 cfqd->rq_in_driver++; 667 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 668 cfqd->rq_in_driver); 669 670 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 671} 672 673static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 674{ 675 struct cfq_data *cfqd = q->elevator->elevator_data; 676 677 WARN_ON(!cfqd->rq_in_driver); 678 cfqd->rq_in_driver--; 679 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 680 cfqd->rq_in_driver); 681} 682 683static void cfq_remove_request(struct request *rq) 684{ 685 struct cfq_queue *cfqq = RQ_CFQQ(rq); 686 687 if (cfqq->next_rq == rq) 688 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 689 690 list_del_init(&rq->queuelist); 691 cfq_del_rq_rb(rq); 692 693 cfqq->cfqd->rq_queued--; 694 if (rq_is_meta(rq)) { 695 WARN_ON(!cfqq->meta_pending); 696 cfqq->meta_pending--; 697 } 698} 699 700static int cfq_merge(struct request_queue *q, struct request **req, 701 struct bio *bio) 702{ 703 struct cfq_data *cfqd = q->elevator->elevator_data; 704 struct request *__rq; 705 706 __rq = cfq_find_rq_fmerge(cfqd, bio); 707 if (__rq && elv_rq_merge_ok(__rq, bio)) { 708 *req = __rq; 709 return ELEVATOR_FRONT_MERGE; 710 } 711 712 return ELEVATOR_NO_MERGE; 713} 714 715static void cfq_merged_request(struct request_queue *q, struct request *req, 716 int type) 717{ 718 if (type == ELEVATOR_FRONT_MERGE) { 719 struct cfq_queue *cfqq = RQ_CFQQ(req); 720 721 cfq_reposition_rq_rb(cfqq, req); 722 } 723} 724 725static void 726cfq_merged_requests(struct request_queue *q, struct request *rq, 727 struct request *next) 728{ 729 /* 730 * reposition in fifo if next is older than rq 731 */ 732 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 733 time_before(next->start_time, rq->start_time)) 734 list_move(&rq->queuelist, &next->queuelist); 735 736 cfq_remove_request(next); 737} 738 739static int cfq_allow_merge(struct request_queue *q, struct request *rq, 740 struct bio *bio) 741{ 742 struct cfq_data *cfqd = q->elevator->elevator_data; 743 struct cfq_io_context *cic; 744 struct cfq_queue *cfqq; 745 746 /* 747 * Disallow merge of a sync bio into an async request. 748 */ 749 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 750 return 0; 751 752 /* 753 * Lookup the cfqq that this bio will be queued with. Allow 754 * merge only if rq is queued there. 755 */ 756 cic = cfq_cic_lookup(cfqd, current->io_context); 757 if (!cic) 758 return 0; 759 760 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 761 if (cfqq == RQ_CFQQ(rq)) 762 return 1; 763 764 return 0; 765} 766 767static void __cfq_set_active_queue(struct cfq_data *cfqd, 768 struct cfq_queue *cfqq) 769{ 770 if (cfqq) { 771 cfq_log_cfqq(cfqd, cfqq, "set_active"); 772 cfqq->slice_end = 0; 773 cfqq->slice_dispatch = 0; 774 775 cfq_clear_cfqq_wait_request(cfqq); 776 cfq_clear_cfqq_must_dispatch(cfqq); 777 cfq_clear_cfqq_must_alloc_slice(cfqq); 778 cfq_clear_cfqq_fifo_expire(cfqq); 779 cfq_mark_cfqq_slice_new(cfqq); 780 781 del_timer(&cfqd->idle_slice_timer); 782 } 783 784 cfqd->active_queue = cfqq; 785} 786 787/* 788 * current cfqq expired its slice (or was too idle), select new one 789 */ 790static void 791__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 792 int timed_out) 793{ 794 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 795 796 if (cfq_cfqq_wait_request(cfqq)) 797 del_timer(&cfqd->idle_slice_timer); 798 799 cfq_clear_cfqq_wait_request(cfqq); 800 801 /* 802 * store what was left of this slice, if the queue idled/timed out 803 */ 804 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 805 cfqq->slice_resid = cfqq->slice_end - jiffies; 806 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 807 } 808 809 cfq_resort_rr_list(cfqd, cfqq); 810 811 if (cfqq == cfqd->active_queue) 812 cfqd->active_queue = NULL; 813 814 if (cfqd->active_cic) { 815 put_io_context(cfqd->active_cic->ioc); 816 cfqd->active_cic = NULL; 817 } 818} 819 820static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 821{ 822 struct cfq_queue *cfqq = cfqd->active_queue; 823 824 if (cfqq) 825 __cfq_slice_expired(cfqd, cfqq, timed_out); 826} 827 828/* 829 * Get next queue for service. Unless we have a queue preemption, 830 * we'll simply select the first cfqq in the service tree. 831 */ 832static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 833{ 834 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 835 return NULL; 836 837 return cfq_rb_first(&cfqd->service_tree); 838} 839 840/* 841 * Get and set a new active queue for service. 842 */ 843static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 844{ 845 struct cfq_queue *cfqq; 846 847 cfqq = cfq_get_next_queue(cfqd); 848 __cfq_set_active_queue(cfqd, cfqq); 849 return cfqq; 850} 851 852static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 853 struct request *rq) 854{ 855 if (rq->sector >= cfqd->last_position) 856 return rq->sector - cfqd->last_position; 857 else 858 return cfqd->last_position - rq->sector; 859} 860 861static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 862{ 863 struct cfq_io_context *cic = cfqd->active_cic; 864 865 if (!sample_valid(cic->seek_samples)) 866 return 0; 867 868 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 869} 870 871static int cfq_close_cooperator(struct cfq_data *cfq_data, 872 struct cfq_queue *cfqq) 873{ 874 /* 875 * We should notice if some of the queues are cooperating, eg 876 * working closely on the same area of the disk. In that case, 877 * we can group them together and don't waste time idling. 878 */ 879 return 0; 880} 881 882#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 883 884static void cfq_arm_slice_timer(struct cfq_data *cfqd) 885{ 886 struct cfq_queue *cfqq = cfqd->active_queue; 887 struct cfq_io_context *cic; 888 unsigned long sl; 889 890 /* 891 * SSD device without seek penalty, disable idling. But only do so 892 * for devices that support queuing, otherwise we still have a problem 893 * with sync vs async workloads. 894 */ 895 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 896 return; 897 898 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 899 WARN_ON(cfq_cfqq_slice_new(cfqq)); 900 901 /* 902 * idle is disabled, either manually or by past process history 903 */ 904 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 905 return; 906 907 /* 908 * still requests with the driver, don't idle 909 */ 910 if (cfqd->rq_in_driver) 911 return; 912 913 /* 914 * task has exited, don't wait 915 */ 916 cic = cfqd->active_cic; 917 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 918 return; 919 920 /* 921 * See if this prio level has a good candidate 922 */ 923 if (cfq_close_cooperator(cfqd, cfqq) && 924 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 925 return; 926 927 cfq_mark_cfqq_wait_request(cfqq); 928 929 /* 930 * we don't want to idle for seeks, but we do want to allow 931 * fair distribution of slice time for a process doing back-to-back 932 * seeks. so allow a little bit of time for him to submit a new rq 933 */ 934 sl = cfqd->cfq_slice_idle; 935 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 936 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 937 938 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 939 cfq_log(cfqd, "arm_idle: %lu", sl); 940} 941 942/* 943 * Move request from internal lists to the request queue dispatch list. 944 */ 945static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 946{ 947 struct cfq_data *cfqd = q->elevator->elevator_data; 948 struct cfq_queue *cfqq = RQ_CFQQ(rq); 949 950 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); 951 952 cfq_remove_request(rq); 953 cfqq->dispatched++; 954 elv_dispatch_sort(q, rq); 955 956 if (cfq_cfqq_sync(cfqq)) 957 cfqd->sync_flight++; 958} 959 960/* 961 * return expired entry, or NULL to just start from scratch in rbtree 962 */ 963static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 964{ 965 struct cfq_data *cfqd = cfqq->cfqd; 966 struct request *rq; 967 int fifo; 968 969 if (cfq_cfqq_fifo_expire(cfqq)) 970 return NULL; 971 972 cfq_mark_cfqq_fifo_expire(cfqq); 973 974 if (list_empty(&cfqq->fifo)) 975 return NULL; 976 977 fifo = cfq_cfqq_sync(cfqq); 978 rq = rq_entry_fifo(cfqq->fifo.next); 979 980 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 981 rq = NULL; 982 983 cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); 984 return rq; 985} 986 987static inline int 988cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 989{ 990 const int base_rq = cfqd->cfq_slice_async_rq; 991 992 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 993 994 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 995} 996 997/* 998 * Select a queue for service. If we have a current active queue, 999 * check whether to continue servicing it, or retrieve and set a new one. 1000 */ 1001static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 1002{ 1003 struct cfq_queue *cfqq; 1004 1005 cfqq = cfqd->active_queue; 1006 if (!cfqq) 1007 goto new_queue; 1008 1009 /* 1010 * The active queue has run out of time, expire it and select new. 1011 */ 1012 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) 1013 goto expire; 1014 1015 /* 1016 * If we have a RT cfqq waiting, then we pre-empt the current non-rt 1017 * cfqq. 1018 */ 1019 if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { 1020 /* 1021 * We simulate this as cfqq timed out so that it gets to bank 1022 * the remaining of its time slice. 1023 */ 1024 cfq_log_cfqq(cfqd, cfqq, "preempt"); 1025 cfq_slice_expired(cfqd, 1); 1026 goto new_queue; 1027 } 1028 1029 /* 1030 * The active queue has requests and isn't expired, allow it to 1031 * dispatch. 1032 */ 1033 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 1034 goto keep_queue; 1035 1036 /* 1037 * No requests pending. If the active queue still has requests in 1038 * flight or is idling for a new request, allow either of these 1039 * conditions to happen (or time out) before selecting a new queue. 1040 */ 1041 if (timer_pending(&cfqd->idle_slice_timer) || 1042 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 1043 cfqq = NULL; 1044 goto keep_queue; 1045 } 1046 1047expire: 1048 cfq_slice_expired(cfqd, 0); 1049new_queue: 1050 cfqq = cfq_set_active_queue(cfqd); 1051keep_queue: 1052 return cfqq; 1053} 1054 1055static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1056{ 1057 int dispatched = 0; 1058 1059 while (cfqq->next_rq) { 1060 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1061 dispatched++; 1062 } 1063 1064 BUG_ON(!list_empty(&cfqq->fifo)); 1065 return dispatched; 1066} 1067 1068/* 1069 * Drain our current requests. Used for barriers and when switching 1070 * io schedulers on-the-fly. 1071 */ 1072static int cfq_forced_dispatch(struct cfq_data *cfqd) 1073{ 1074 struct cfq_queue *cfqq; 1075 int dispatched = 0; 1076 1077 while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL) 1078 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1079 1080 cfq_slice_expired(cfqd, 0); 1081 1082 BUG_ON(cfqd->busy_queues); 1083 1084 cfq_log(cfqd, "forced_dispatch=%d\n", dispatched); 1085 return dispatched; 1086} 1087 1088/* 1089 * Dispatch a request from cfqq, moving them to the request queue 1090 * dispatch list. 1091 */ 1092static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1093{ 1094 struct request *rq; 1095 1096 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1097 1098 /* 1099 * follow expired path, else get first next available 1100 */ 1101 rq = cfq_check_fifo(cfqq); 1102 if (!rq) 1103 rq = cfqq->next_rq; 1104 1105 /* 1106 * insert request into driver dispatch list 1107 */ 1108 cfq_dispatch_insert(cfqd->queue, rq); 1109 1110 if (!cfqd->active_cic) { 1111 struct cfq_io_context *cic = RQ_CIC(rq); 1112 1113 atomic_inc(&cic->ioc->refcount); 1114 cfqd->active_cic = cic; 1115 } 1116} 1117 1118/* 1119 * Find the cfqq that we need to service and move a request from that to the 1120 * dispatch list 1121 */ 1122static int cfq_dispatch_requests(struct request_queue *q, int force) 1123{ 1124 struct cfq_data *cfqd = q->elevator->elevator_data; 1125 struct cfq_queue *cfqq; 1126 unsigned int max_dispatch; 1127 1128 if (!cfqd->busy_queues) 1129 return 0; 1130 1131 if (unlikely(force)) 1132 return cfq_forced_dispatch(cfqd); 1133 1134 cfqq = cfq_select_queue(cfqd); 1135 if (!cfqq) 1136 return 0; 1137 1138 /* 1139 * If this is an async queue and we have sync IO in flight, let it wait 1140 */ 1141 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1142 return 0; 1143 1144 max_dispatch = cfqd->cfq_quantum; 1145 if (cfq_class_idle(cfqq)) 1146 max_dispatch = 1; 1147 1148 /* 1149 * Does this cfqq already have too much IO in flight? 1150 */ 1151 if (cfqq->dispatched >= max_dispatch) { 1152 /* 1153 * idle queue must always only have a single IO in flight 1154 */ 1155 if (cfq_class_idle(cfqq)) 1156 return 0; 1157 1158 /* 1159 * We have other queues, don't allow more IO from this one 1160 */ 1161 if (cfqd->busy_queues > 1) 1162 return 0; 1163 1164 /* 1165 * we are the only queue, allow up to 4 times of 'quantum' 1166 */ 1167 if (cfqq->dispatched >= 4 * max_dispatch) 1168 return 0; 1169 } 1170 1171 /* 1172 * Dispatch a request from this cfqq 1173 */ 1174 cfq_dispatch_request(cfqd, cfqq); 1175 cfqq->slice_dispatch++; 1176 cfq_clear_cfqq_must_dispatch(cfqq); 1177 1178 /* 1179 * expire an async queue immediately if it has used up its slice. idle 1180 * queue always expire after 1 dispatch round. 1181 */ 1182 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1183 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1184 cfq_class_idle(cfqq))) { 1185 cfqq->slice_end = jiffies + 1; 1186 cfq_slice_expired(cfqd, 0); 1187 } 1188 1189 cfq_log(cfqd, "dispatched a request"); 1190 return 1; 1191} 1192 1193/* 1194 * task holds one reference to the queue, dropped when task exits. each rq 1195 * in-flight on this queue also holds a reference, dropped when rq is freed. 1196 * 1197 * queue lock must be held here. 1198 */ 1199static void cfq_put_queue(struct cfq_queue *cfqq) 1200{ 1201 struct cfq_data *cfqd = cfqq->cfqd; 1202 1203 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1204 1205 if (!atomic_dec_and_test(&cfqq->ref)) 1206 return; 1207 1208 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 1209 BUG_ON(rb_first(&cfqq->sort_list)); 1210 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1211 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1212 1213 if (unlikely(cfqd->active_queue == cfqq)) { 1214 __cfq_slice_expired(cfqd, cfqq, 0); 1215 cfq_schedule_dispatch(cfqd); 1216 } 1217 1218 kmem_cache_free(cfq_pool, cfqq); 1219} 1220 1221/* 1222 * Must always be called with the rcu_read_lock() held 1223 */ 1224static void 1225__call_for_each_cic(struct io_context *ioc, 1226 void (*func)(struct io_context *, struct cfq_io_context *)) 1227{ 1228 struct cfq_io_context *cic; 1229 struct hlist_node *n; 1230 1231 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 1232 func(ioc, cic); 1233} 1234 1235/* 1236 * Call func for each cic attached to this ioc. 1237 */ 1238static void 1239call_for_each_cic(struct io_context *ioc, 1240 void (*func)(struct io_context *, struct cfq_io_context *)) 1241{ 1242 rcu_read_lock(); 1243 __call_for_each_cic(ioc, func); 1244 rcu_read_unlock(); 1245} 1246 1247static void cfq_cic_free_rcu(struct rcu_head *head) 1248{ 1249 struct cfq_io_context *cic; 1250 1251 cic = container_of(head, struct cfq_io_context, rcu_head); 1252 1253 kmem_cache_free(cfq_ioc_pool, cic); 1254 elv_ioc_count_dec(ioc_count); 1255 1256 if (ioc_gone) { 1257 /* 1258 * CFQ scheduler is exiting, grab exit lock and check 1259 * the pending io context count. If it hits zero, 1260 * complete ioc_gone and set it back to NULL 1261 */ 1262 spin_lock(&ioc_gone_lock); 1263 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1264 complete(ioc_gone); 1265 ioc_gone = NULL; 1266 } 1267 spin_unlock(&ioc_gone_lock); 1268 } 1269} 1270 1271static void cfq_cic_free(struct cfq_io_context *cic) 1272{ 1273 call_rcu(&cic->rcu_head, cfq_cic_free_rcu); 1274} 1275 1276static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) 1277{ 1278 unsigned long flags; 1279 1280 BUG_ON(!cic->dead_key); 1281 1282 spin_lock_irqsave(&ioc->lock, flags); 1283 radix_tree_delete(&ioc->radix_root, cic->dead_key); 1284 hlist_del_rcu(&cic->cic_list); 1285 spin_unlock_irqrestore(&ioc->lock, flags); 1286 1287 cfq_cic_free(cic); 1288} 1289 1290/* 1291 * Must be called with rcu_read_lock() held or preemption otherwise disabled. 1292 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), 1293 * and ->trim() which is called with the task lock held 1294 */ 1295static void cfq_free_io_context(struct io_context *ioc) 1296{ 1297 /* 1298 * ioc->refcount is zero here, or we are called from elv_unregister(), 1299 * so no more cic's are allowed to be linked into this ioc. So it 1300 * should be ok to iterate over the known list, we will see all cic's 1301 * since no new ones are added. 1302 */ 1303 __call_for_each_cic(ioc, cic_free_func); 1304} 1305 1306static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1307{ 1308 if (unlikely(cfqq == cfqd->active_queue)) { 1309 __cfq_slice_expired(cfqd, cfqq, 0); 1310 cfq_schedule_dispatch(cfqd); 1311 } 1312 1313 cfq_put_queue(cfqq); 1314} 1315 1316static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1317 struct cfq_io_context *cic) 1318{ 1319 struct io_context *ioc = cic->ioc; 1320 1321 list_del_init(&cic->queue_list); 1322 1323 /* 1324 * Make sure key == NULL is seen for dead queues 1325 */ 1326 smp_wmb(); 1327 cic->dead_key = (unsigned long) cic->key; 1328 cic->key = NULL; 1329 1330 if (ioc->ioc_data == cic) 1331 rcu_assign_pointer(ioc->ioc_data, NULL); 1332 1333 if (cic->cfqq[BLK_RW_ASYNC]) { 1334 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 1335 cic->cfqq[BLK_RW_ASYNC] = NULL; 1336 } 1337 1338 if (cic->cfqq[BLK_RW_SYNC]) { 1339 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); 1340 cic->cfqq[BLK_RW_SYNC] = NULL; 1341 } 1342} 1343 1344static void cfq_exit_single_io_context(struct io_context *ioc, 1345 struct cfq_io_context *cic) 1346{ 1347 struct cfq_data *cfqd = cic->key; 1348 1349 if (cfqd) { 1350 struct request_queue *q = cfqd->queue; 1351 unsigned long flags; 1352 1353 spin_lock_irqsave(q->queue_lock, flags); 1354 1355 /* 1356 * Ensure we get a fresh copy of the ->key to prevent 1357 * race between exiting task and queue 1358 */ 1359 smp_read_barrier_depends(); 1360 if (cic->key) 1361 __cfq_exit_single_io_context(cfqd, cic); 1362 1363 spin_unlock_irqrestore(q->queue_lock, flags); 1364 } 1365} 1366 1367/* 1368 * The process that ioc belongs to has exited, we need to clean up 1369 * and put the internal structures we have that belongs to that process. 1370 */ 1371static void cfq_exit_io_context(struct io_context *ioc) 1372{ 1373 call_for_each_cic(ioc, cfq_exit_single_io_context); 1374} 1375 1376static struct cfq_io_context * 1377cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1378{ 1379 struct cfq_io_context *cic; 1380 1381 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, 1382 cfqd->queue->node); 1383 if (cic) { 1384 cic->last_end_request = jiffies; 1385 INIT_LIST_HEAD(&cic->queue_list); 1386 INIT_HLIST_NODE(&cic->cic_list); 1387 cic->dtor = cfq_free_io_context; 1388 cic->exit = cfq_exit_io_context; 1389 elv_ioc_count_inc(ioc_count); 1390 } 1391 1392 return cic; 1393} 1394 1395static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 1396{ 1397 struct task_struct *tsk = current; 1398 int ioprio_class; 1399 1400 if (!cfq_cfqq_prio_changed(cfqq)) 1401 return; 1402 1403 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1404 switch (ioprio_class) { 1405 default: 1406 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1407 case IOPRIO_CLASS_NONE: 1408 /* 1409 * no prio set, inherit CPU scheduling settings 1410 */ 1411 cfqq->ioprio = task_nice_ioprio(tsk); 1412 cfqq->ioprio_class = task_nice_ioclass(tsk); 1413 break; 1414 case IOPRIO_CLASS_RT: 1415 cfqq->ioprio = task_ioprio(ioc); 1416 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1417 break; 1418 case IOPRIO_CLASS_BE: 1419 cfqq->ioprio = task_ioprio(ioc); 1420 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1421 break; 1422 case IOPRIO_CLASS_IDLE: 1423 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1424 cfqq->ioprio = 7; 1425 cfq_clear_cfqq_idle_window(cfqq); 1426 break; 1427 } 1428 1429 /* 1430 * keep track of original prio settings in case we have to temporarily 1431 * elevate the priority of this queue 1432 */ 1433 cfqq->org_ioprio = cfqq->ioprio; 1434 cfqq->org_ioprio_class = cfqq->ioprio_class; 1435 cfq_clear_cfqq_prio_changed(cfqq); 1436} 1437 1438static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) 1439{ 1440 struct cfq_data *cfqd = cic->key; 1441 struct cfq_queue *cfqq; 1442 unsigned long flags; 1443 1444 if (unlikely(!cfqd)) 1445 return; 1446 1447 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1448 1449 cfqq = cic->cfqq[BLK_RW_ASYNC]; 1450 if (cfqq) { 1451 struct cfq_queue *new_cfqq; 1452 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, 1453 GFP_ATOMIC); 1454 if (new_cfqq) { 1455 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 1456 cfq_put_queue(cfqq); 1457 } 1458 } 1459 1460 cfqq = cic->cfqq[BLK_RW_SYNC]; 1461 if (cfqq) 1462 cfq_mark_cfqq_prio_changed(cfqq); 1463 1464 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1465} 1466 1467static void cfq_ioc_set_ioprio(struct io_context *ioc) 1468{ 1469 call_for_each_cic(ioc, changed_ioprio); 1470 ioc->ioprio_changed = 0; 1471} 1472 1473static struct cfq_queue * 1474cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1475 struct io_context *ioc, gfp_t gfp_mask) 1476{ 1477 struct cfq_queue *cfqq, *new_cfqq = NULL; 1478 struct cfq_io_context *cic; 1479 1480retry: 1481 cic = cfq_cic_lookup(cfqd, ioc); 1482 /* cic always exists here */ 1483 cfqq = cic_to_cfqq(cic, is_sync); 1484 1485 if (!cfqq) { 1486 if (new_cfqq) { 1487 cfqq = new_cfqq; 1488 new_cfqq = NULL; 1489 } else if (gfp_mask & __GFP_WAIT) { 1490 /* 1491 * Inform the allocator of the fact that we will 1492 * just repeat this allocation if it fails, to allow 1493 * the allocator to do whatever it needs to attempt to 1494 * free memory. 1495 */ 1496 spin_unlock_irq(cfqd->queue->queue_lock); 1497 new_cfqq = kmem_cache_alloc_node(cfq_pool, 1498 gfp_mask | __GFP_NOFAIL | __GFP_ZERO, 1499 cfqd->queue->node); 1500 spin_lock_irq(cfqd->queue->queue_lock); 1501 goto retry; 1502 } else { 1503 cfqq = kmem_cache_alloc_node(cfq_pool, 1504 gfp_mask | __GFP_ZERO, 1505 cfqd->queue->node); 1506 if (!cfqq) 1507 goto out; 1508 } 1509 1510 RB_CLEAR_NODE(&cfqq->rb_node); 1511 INIT_LIST_HEAD(&cfqq->fifo); 1512 1513 atomic_set(&cfqq->ref, 0); 1514 cfqq->cfqd = cfqd; 1515 1516 cfq_mark_cfqq_prio_changed(cfqq); 1517 1518 cfq_init_prio_data(cfqq, ioc); 1519 1520 if (is_sync) { 1521 if (!cfq_class_idle(cfqq)) 1522 cfq_mark_cfqq_idle_window(cfqq); 1523 cfq_mark_cfqq_sync(cfqq); 1524 } 1525 cfqq->pid = current->pid; 1526 cfq_log_cfqq(cfqd, cfqq, "alloced"); 1527 } 1528 1529 if (new_cfqq) 1530 kmem_cache_free(cfq_pool, new_cfqq); 1531 1532out: 1533 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1534 return cfqq; 1535} 1536 1537static struct cfq_queue ** 1538cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1539{ 1540 switch (ioprio_class) { 1541 case IOPRIO_CLASS_RT: 1542 return &cfqd->async_cfqq[0][ioprio]; 1543 case IOPRIO_CLASS_BE: 1544 return &cfqd->async_cfqq[1][ioprio]; 1545 case IOPRIO_CLASS_IDLE: 1546 return &cfqd->async_idle_cfqq; 1547 default: 1548 BUG(); 1549 } 1550} 1551 1552static struct cfq_queue * 1553cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1554 gfp_t gfp_mask) 1555{ 1556 const int ioprio = task_ioprio(ioc); 1557 const int ioprio_class = task_ioprio_class(ioc); 1558 struct cfq_queue **async_cfqq = NULL; 1559 struct cfq_queue *cfqq = NULL; 1560 1561 if (!is_sync) { 1562 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 1563 cfqq = *async_cfqq; 1564 } 1565 1566 if (!cfqq) { 1567 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); 1568 if (!cfqq) 1569 return NULL; 1570 } 1571 1572 /* 1573 * pin the queue now that it's allocated, scheduler exit will prune it 1574 */ 1575 if (!is_sync && !(*async_cfqq)) { 1576 atomic_inc(&cfqq->ref); 1577 *async_cfqq = cfqq; 1578 } 1579 1580 atomic_inc(&cfqq->ref); 1581 return cfqq; 1582} 1583 1584/* 1585 * We drop cfq io contexts lazily, so we may find a dead one. 1586 */ 1587static void 1588cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, 1589 struct cfq_io_context *cic) 1590{ 1591 unsigned long flags; 1592 1593 WARN_ON(!list_empty(&cic->queue_list)); 1594 1595 spin_lock_irqsave(&ioc->lock, flags); 1596 1597 BUG_ON(ioc->ioc_data == cic); 1598 1599 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd); 1600 hlist_del_rcu(&cic->cic_list); 1601 spin_unlock_irqrestore(&ioc->lock, flags); 1602 1603 cfq_cic_free(cic); 1604} 1605 1606static struct cfq_io_context * 1607cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1608{ 1609 struct cfq_io_context *cic; 1610 unsigned long flags; 1611 void *k; 1612 1613 if (unlikely(!ioc)) 1614 return NULL; 1615 1616 rcu_read_lock(); 1617 1618 /* 1619 * we maintain a last-hit cache, to avoid browsing over the tree 1620 */ 1621 cic = rcu_dereference(ioc->ioc_data); 1622 if (cic && cic->key == cfqd) { 1623 rcu_read_unlock(); 1624 return cic; 1625 } 1626 1627 do { 1628 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd); 1629 rcu_read_unlock(); 1630 if (!cic) 1631 break; 1632 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1633 k = cic->key; 1634 if (unlikely(!k)) { 1635 cfq_drop_dead_cic(cfqd, ioc, cic); 1636 rcu_read_lock(); 1637 continue; 1638 } 1639 1640 spin_lock_irqsave(&ioc->lock, flags); 1641 rcu_assign_pointer(ioc->ioc_data, cic); 1642 spin_unlock_irqrestore(&ioc->lock, flags); 1643 break; 1644 } while (1); 1645 1646 return cic; 1647} 1648 1649/* 1650 * Add cic into ioc, using cfqd as the search key. This enables us to lookup 1651 * the process specific cfq io context when entered from the block layer. 1652 * Also adds the cic to a per-cfqd list, used when this queue is removed. 1653 */ 1654static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1655 struct cfq_io_context *cic, gfp_t gfp_mask) 1656{ 1657 unsigned long flags; 1658 int ret; 1659 1660 ret = radix_tree_preload(gfp_mask); 1661 if (!ret) { 1662 cic->ioc = ioc; 1663 cic->key = cfqd; 1664 1665 spin_lock_irqsave(&ioc->lock, flags); 1666 ret = radix_tree_insert(&ioc->radix_root, 1667 (unsigned long) cfqd, cic); 1668 if (!ret) 1669 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); 1670 spin_unlock_irqrestore(&ioc->lock, flags); 1671 1672 radix_tree_preload_end(); 1673 1674 if (!ret) { 1675 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1676 list_add(&cic->queue_list, &cfqd->cic_list); 1677 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1678 } 1679 } 1680 1681 if (ret) 1682 printk(KERN_ERR "cfq: cic link failed!\n"); 1683 1684 return ret; 1685} 1686 1687/* 1688 * Setup general io context and cfq io context. There can be several cfq 1689 * io contexts per general io context, if this process is doing io to more 1690 * than one device managed by cfq. 1691 */ 1692static struct cfq_io_context * 1693cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1694{ 1695 struct io_context *ioc = NULL; 1696 struct cfq_io_context *cic; 1697 1698 might_sleep_if(gfp_mask & __GFP_WAIT); 1699 1700 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1701 if (!ioc) 1702 return NULL; 1703 1704 cic = cfq_cic_lookup(cfqd, ioc); 1705 if (cic) 1706 goto out; 1707 1708 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1709 if (cic == NULL) 1710 goto err; 1711 1712 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) 1713 goto err_free; 1714 1715out: 1716 smp_read_barrier_depends(); 1717 if (unlikely(ioc->ioprio_changed)) 1718 cfq_ioc_set_ioprio(ioc); 1719 1720 return cic; 1721err_free: 1722 cfq_cic_free(cic); 1723err: 1724 put_io_context(ioc); 1725 return NULL; 1726} 1727 1728static void 1729cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1730{ 1731 unsigned long elapsed = jiffies - cic->last_end_request; 1732 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1733 1734 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1735 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1736 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1737} 1738 1739static void 1740cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1741 struct request *rq) 1742{ 1743 sector_t sdist; 1744 u64 total; 1745 1746 if (cic->last_request_pos < rq->sector) 1747 sdist = rq->sector - cic->last_request_pos; 1748 else 1749 sdist = cic->last_request_pos - rq->sector; 1750 1751 /* 1752 * Don't allow the seek distance to get too large from the 1753 * odd fragment, pagein, etc 1754 */ 1755 if (cic->seek_samples <= 60) /* second&third seek */ 1756 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1757 else 1758 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1759 1760 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1761 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1762 total = cic->seek_total + (cic->seek_samples/2); 1763 do_div(total, cic->seek_samples); 1764 cic->seek_mean = (sector_t)total; 1765} 1766 1767/* 1768 * Disable idle window if the process thinks too long or seeks so much that 1769 * it doesn't matter 1770 */ 1771static void 1772cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1773 struct cfq_io_context *cic) 1774{ 1775 int old_idle, enable_idle; 1776 1777 /* 1778 * Don't idle for async or idle io prio class 1779 */ 1780 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 1781 return; 1782 1783 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1784 1785 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1786 (cfqd->hw_tag && CIC_SEEKY(cic))) 1787 enable_idle = 0; 1788 else if (sample_valid(cic->ttime_samples)) { 1789 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1790 enable_idle = 0; 1791 else 1792 enable_idle = 1; 1793 } 1794 1795 if (old_idle != enable_idle) { 1796 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); 1797 if (enable_idle) 1798 cfq_mark_cfqq_idle_window(cfqq); 1799 else 1800 cfq_clear_cfqq_idle_window(cfqq); 1801 } 1802} 1803 1804/* 1805 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1806 * no or if we aren't sure, a 1 will cause a preempt. 1807 */ 1808static int 1809cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1810 struct request *rq) 1811{ 1812 struct cfq_queue *cfqq; 1813 1814 cfqq = cfqd->active_queue; 1815 if (!cfqq) 1816 return 0; 1817 1818 if (cfq_slice_used(cfqq)) 1819 return 1; 1820 1821 if (cfq_class_idle(new_cfqq)) 1822 return 0; 1823 1824 if (cfq_class_idle(cfqq)) 1825 return 1; 1826 1827 /* 1828 * if the new request is sync, but the currently running queue is 1829 * not, let the sync request have priority. 1830 */ 1831 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1832 return 1; 1833 1834 /* 1835 * So both queues are sync. Let the new request get disk time if 1836 * it's a metadata request and the current queue is doing regular IO. 1837 */ 1838 if (rq_is_meta(rq) && !cfqq->meta_pending) 1839 return 1; 1840 1841 /* 1842 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 1843 */ 1844 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 1845 return 1; 1846 1847 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 1848 return 0; 1849 1850 /* 1851 * if this request is as-good as one we would expect from the 1852 * current cfqq, let it preempt 1853 */ 1854 if (cfq_rq_close(cfqd, rq)) 1855 return 1; 1856 1857 return 0; 1858} 1859 1860/* 1861 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1862 * let it have half of its nominal slice. 1863 */ 1864static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1865{ 1866 cfq_log_cfqq(cfqd, cfqq, "preempt"); 1867 cfq_slice_expired(cfqd, 1); 1868 1869 /* 1870 * Put the new queue at the front of the of the current list, 1871 * so we know that it will be selected next. 1872 */ 1873 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1874 1875 cfq_service_tree_add(cfqd, cfqq, 1); 1876 1877 cfqq->slice_end = 0; 1878 cfq_mark_cfqq_slice_new(cfqq); 1879} 1880 1881/* 1882 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1883 * something we should do about it 1884 */ 1885static void 1886cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1887 struct request *rq) 1888{ 1889 struct cfq_io_context *cic = RQ_CIC(rq); 1890 1891 cfqd->rq_queued++; 1892 if (rq_is_meta(rq)) 1893 cfqq->meta_pending++; 1894 1895 cfq_update_io_thinktime(cfqd, cic); 1896 cfq_update_io_seektime(cfqd, cic, rq); 1897 cfq_update_idle_window(cfqd, cfqq, cic); 1898 1899 cic->last_request_pos = rq->sector + rq->nr_sectors; 1900 1901 if (cfqq == cfqd->active_queue) { 1902 /* 1903 * Remember that we saw a request from this process, but 1904 * don't start queuing just yet. Otherwise we risk seeing lots 1905 * of tiny requests, because we disrupt the normal plugging 1906 * and merging. 1907 */ 1908 if (cfq_cfqq_wait_request(cfqq)) 1909 cfq_mark_cfqq_must_dispatch(cfqq); 1910 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1911 /* 1912 * not the active queue - expire current slice if it is 1913 * idle and has expired it's mean thinktime or this new queue 1914 * has some old slice time left and is of higher priority or 1915 * this new queue is RT and the current one is BE 1916 */ 1917 cfq_preempt_queue(cfqd, cfqq); 1918 blk_start_queueing(cfqd->queue); 1919 } 1920} 1921 1922static void cfq_insert_request(struct request_queue *q, struct request *rq) 1923{ 1924 struct cfq_data *cfqd = q->elevator->elevator_data; 1925 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1926 1927 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 1928 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); 1929 1930 cfq_add_rq_rb(rq); 1931 1932 list_add_tail(&rq->queuelist, &cfqq->fifo); 1933 1934 cfq_rq_enqueued(cfqd, cfqq, rq); 1935} 1936 1937/* 1938 * Update hw_tag based on peak queue depth over 50 samples under 1939 * sufficient load. 1940 */ 1941static void cfq_update_hw_tag(struct cfq_data *cfqd) 1942{ 1943 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) 1944 cfqd->rq_in_driver_peak = cfqd->rq_in_driver; 1945 1946 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 1947 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) 1948 return; 1949 1950 if (cfqd->hw_tag_samples++ < 50) 1951 return; 1952 1953 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN) 1954 cfqd->hw_tag = 1; 1955 else 1956 cfqd->hw_tag = 0; 1957 1958 cfqd->hw_tag_samples = 0; 1959 cfqd->rq_in_driver_peak = 0; 1960} 1961 1962static void cfq_completed_request(struct request_queue *q, struct request *rq) 1963{ 1964 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1965 struct cfq_data *cfqd = cfqq->cfqd; 1966 const int sync = rq_is_sync(rq); 1967 unsigned long now; 1968 1969 now = jiffies; 1970 cfq_log_cfqq(cfqd, cfqq, "complete"); 1971 1972 cfq_update_hw_tag(cfqd); 1973 1974 WARN_ON(!cfqd->rq_in_driver); 1975 WARN_ON(!cfqq->dispatched); 1976 cfqd->rq_in_driver--; 1977 cfqq->dispatched--; 1978 1979 if (cfq_cfqq_sync(cfqq)) 1980 cfqd->sync_flight--; 1981 1982 if (!cfq_class_idle(cfqq)) 1983 cfqd->last_end_request = now; 1984 1985 if (sync) 1986 RQ_CIC(rq)->last_end_request = now; 1987 1988 /* 1989 * If this is the active queue, check if it needs to be expired, 1990 * or if we want to idle in case it has no pending requests. 1991 */ 1992 if (cfqd->active_queue == cfqq) { 1993 if (cfq_cfqq_slice_new(cfqq)) { 1994 cfq_set_prio_slice(cfqd, cfqq); 1995 cfq_clear_cfqq_slice_new(cfqq); 1996 } 1997 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 1998 cfq_slice_expired(cfqd, 1); 1999 else if (sync && !rq_noidle(rq) && 2000 RB_EMPTY_ROOT(&cfqq->sort_list)) { 2001 cfq_arm_slice_timer(cfqd); 2002 } 2003 } 2004 2005 if (!cfqd->rq_in_driver) 2006 cfq_schedule_dispatch(cfqd); 2007} 2008 2009/* 2010 * we temporarily boost lower priority queues if they are holding fs exclusive 2011 * resources. they are boosted to normal prio (CLASS_BE/4) 2012 */ 2013static void cfq_prio_boost(struct cfq_queue *cfqq) 2014{ 2015 if (has_fs_excl()) { 2016 /* 2017 * boost idle prio on transactions that would lock out other 2018 * users of the filesystem 2019 */ 2020 if (cfq_class_idle(cfqq)) 2021 cfqq->ioprio_class = IOPRIO_CLASS_BE; 2022 if (cfqq->ioprio > IOPRIO_NORM) 2023 cfqq->ioprio = IOPRIO_NORM; 2024 } else { 2025 /* 2026 * check if we need to unboost the queue 2027 */ 2028 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 2029 cfqq->ioprio_class = cfqq->org_ioprio_class; 2030 if (cfqq->ioprio != cfqq->org_ioprio) 2031 cfqq->ioprio = cfqq->org_ioprio; 2032 } 2033} 2034 2035static inline int __cfq_may_queue(struct cfq_queue *cfqq) 2036{ 2037 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 2038 !cfq_cfqq_must_alloc_slice(cfqq)) { 2039 cfq_mark_cfqq_must_alloc_slice(cfqq); 2040 return ELV_MQUEUE_MUST; 2041 } 2042 2043 return ELV_MQUEUE_MAY; 2044} 2045 2046static int cfq_may_queue(struct request_queue *q, int rw) 2047{ 2048 struct cfq_data *cfqd = q->elevator->elevator_data; 2049 struct task_struct *tsk = current; 2050 struct cfq_io_context *cic; 2051 struct cfq_queue *cfqq; 2052 2053 /* 2054 * don't force setup of a queue from here, as a call to may_queue 2055 * does not necessarily imply that a request actually will be queued. 2056 * so just lookup a possibly existing queue, or return 'may queue' 2057 * if that fails 2058 */ 2059 cic = cfq_cic_lookup(cfqd, tsk->io_context); 2060 if (!cic) 2061 return ELV_MQUEUE_MAY; 2062 2063 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 2064 if (cfqq) { 2065 cfq_init_prio_data(cfqq, cic->ioc); 2066 cfq_prio_boost(cfqq); 2067 2068 return __cfq_may_queue(cfqq); 2069 } 2070 2071 return ELV_MQUEUE_MAY; 2072} 2073 2074/* 2075 * queue lock held here 2076 */ 2077static void cfq_put_request(struct request *rq) 2078{ 2079 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2080 2081 if (cfqq) { 2082 const int rw = rq_data_dir(rq); 2083 2084 BUG_ON(!cfqq->allocated[rw]); 2085 cfqq->allocated[rw]--; 2086 2087 put_io_context(RQ_CIC(rq)->ioc); 2088 2089 rq->elevator_private = NULL; 2090 rq->elevator_private2 = NULL; 2091 2092 cfq_put_queue(cfqq); 2093 } 2094} 2095 2096/* 2097 * Allocate cfq data structures associated with this request. 2098 */ 2099static int 2100cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) 2101{ 2102 struct cfq_data *cfqd = q->elevator->elevator_data; 2103 struct cfq_io_context *cic; 2104 const int rw = rq_data_dir(rq); 2105 const int is_sync = rq_is_sync(rq); 2106 struct cfq_queue *cfqq; 2107 unsigned long flags; 2108 2109 might_sleep_if(gfp_mask & __GFP_WAIT); 2110 2111 cic = cfq_get_io_context(cfqd, gfp_mask); 2112 2113 spin_lock_irqsave(q->queue_lock, flags); 2114 2115 if (!cic) 2116 goto queue_fail; 2117 2118 cfqq = cic_to_cfqq(cic, is_sync); 2119 if (!cfqq) { 2120 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); 2121 2122 if (!cfqq) 2123 goto queue_fail; 2124 2125 cic_set_cfqq(cic, cfqq, is_sync); 2126 } 2127 2128 cfqq->allocated[rw]++; 2129 cfq_clear_cfqq_must_alloc(cfqq); 2130 atomic_inc(&cfqq->ref); 2131 2132 spin_unlock_irqrestore(q->queue_lock, flags); 2133 2134 rq->elevator_private = cic; 2135 rq->elevator_private2 = cfqq; 2136 return 0; 2137 2138queue_fail: 2139 if (cic) 2140 put_io_context(cic->ioc); 2141 2142 cfq_schedule_dispatch(cfqd); 2143 spin_unlock_irqrestore(q->queue_lock, flags); 2144 cfq_log(cfqd, "set_request fail"); 2145 return 1; 2146} 2147 2148static void cfq_kick_queue(struct work_struct *work) 2149{ 2150 struct cfq_data *cfqd = 2151 container_of(work, struct cfq_data, unplug_work); 2152 struct request_queue *q = cfqd->queue; 2153 unsigned long flags; 2154 2155 spin_lock_irqsave(q->queue_lock, flags); 2156 blk_start_queueing(q); 2157 spin_unlock_irqrestore(q->queue_lock, flags); 2158} 2159 2160/* 2161 * Timer running if the active_queue is currently idling inside its time slice 2162 */ 2163static void cfq_idle_slice_timer(unsigned long data) 2164{ 2165 struct cfq_data *cfqd = (struct cfq_data *) data; 2166 struct cfq_queue *cfqq; 2167 unsigned long flags; 2168 int timed_out = 1; 2169 2170 cfq_log(cfqd, "idle timer fired"); 2171 2172 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2173 2174 cfqq = cfqd->active_queue; 2175 if (cfqq) { 2176 timed_out = 0; 2177 2178 /* 2179 * We saw a request before the queue expired, let it through 2180 */ 2181 if (cfq_cfqq_must_dispatch(cfqq)) 2182 goto out_kick; 2183 2184 /* 2185 * expired 2186 */ 2187 if (cfq_slice_used(cfqq)) 2188 goto expire; 2189 2190 /* 2191 * only expire and reinvoke request handler, if there are 2192 * other queues with pending requests 2193 */ 2194 if (!cfqd->busy_queues) 2195 goto out_cont; 2196 2197 /* 2198 * not expired and it has a request pending, let it dispatch 2199 */ 2200 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 2201 goto out_kick; 2202 } 2203expire: 2204 cfq_slice_expired(cfqd, timed_out); 2205out_kick: 2206 cfq_schedule_dispatch(cfqd); 2207out_cont: 2208 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2209} 2210 2211static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2212{ 2213 del_timer_sync(&cfqd->idle_slice_timer); 2214 cancel_work_sync(&cfqd->unplug_work); 2215} 2216 2217static void cfq_put_async_queues(struct cfq_data *cfqd) 2218{ 2219 int i; 2220 2221 for (i = 0; i < IOPRIO_BE_NR; i++) { 2222 if (cfqd->async_cfqq[0][i]) 2223 cfq_put_queue(cfqd->async_cfqq[0][i]); 2224 if (cfqd->async_cfqq[1][i]) 2225 cfq_put_queue(cfqd->async_cfqq[1][i]); 2226 } 2227 2228 if (cfqd->async_idle_cfqq) 2229 cfq_put_queue(cfqd->async_idle_cfqq); 2230} 2231 2232static void cfq_exit_queue(struct elevator_queue *e) 2233{ 2234 struct cfq_data *cfqd = e->elevator_data; 2235 struct request_queue *q = cfqd->queue; 2236 2237 cfq_shutdown_timer_wq(cfqd); 2238 2239 spin_lock_irq(q->queue_lock); 2240 2241 if (cfqd->active_queue) 2242 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2243 2244 while (!list_empty(&cfqd->cic_list)) { 2245 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2246 struct cfq_io_context, 2247 queue_list); 2248 2249 __cfq_exit_single_io_context(cfqd, cic); 2250 } 2251 2252 cfq_put_async_queues(cfqd); 2253 2254 spin_unlock_irq(q->queue_lock); 2255 2256 cfq_shutdown_timer_wq(cfqd); 2257 2258 kfree(cfqd); 2259} 2260 2261static void *cfq_init_queue(struct request_queue *q) 2262{ 2263 struct cfq_data *cfqd; 2264 2265 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 2266 if (!cfqd) 2267 return NULL; 2268 2269 cfqd->service_tree = CFQ_RB_ROOT; 2270 INIT_LIST_HEAD(&cfqd->cic_list); 2271 2272 cfqd->queue = q; 2273 2274 init_timer(&cfqd->idle_slice_timer); 2275 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2276 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2277 2278 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2279 2280 cfqd->last_end_request = jiffies; 2281 cfqd->cfq_quantum = cfq_quantum; 2282 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2283 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2284 cfqd->cfq_back_max = cfq_back_max; 2285 cfqd->cfq_back_penalty = cfq_back_penalty; 2286 cfqd->cfq_slice[0] = cfq_slice_async; 2287 cfqd->cfq_slice[1] = cfq_slice_sync; 2288 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2289 cfqd->cfq_slice_idle = cfq_slice_idle; 2290 cfqd->hw_tag = 1; 2291 2292 return cfqd; 2293} 2294 2295static void cfq_slab_kill(void) 2296{ 2297 /* 2298 * Caller already ensured that pending RCU callbacks are completed, 2299 * so we should have no busy allocations at this point. 2300 */ 2301 if (cfq_pool) 2302 kmem_cache_destroy(cfq_pool); 2303 if (cfq_ioc_pool) 2304 kmem_cache_destroy(cfq_ioc_pool); 2305} 2306 2307static int __init cfq_slab_setup(void) 2308{ 2309 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2310 if (!cfq_pool) 2311 goto fail; 2312 2313 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); 2314 if (!cfq_ioc_pool) 2315 goto fail; 2316 2317 return 0; 2318fail: 2319 cfq_slab_kill(); 2320 return -ENOMEM; 2321} 2322 2323/* 2324 * sysfs parts below --> 2325 */ 2326static ssize_t 2327cfq_var_show(unsigned int var, char *page) 2328{ 2329 return sprintf(page, "%d\n", var); 2330} 2331 2332static ssize_t 2333cfq_var_store(unsigned int *var, const char *page, size_t count) 2334{ 2335 char *p = (char *) page; 2336 2337 *var = simple_strtoul(p, &p, 10); 2338 return count; 2339} 2340 2341#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2342static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 2343{ \ 2344 struct cfq_data *cfqd = e->elevator_data; \ 2345 unsigned int __data = __VAR; \ 2346 if (__CONV) \ 2347 __data = jiffies_to_msecs(__data); \ 2348 return cfq_var_show(__data, (page)); \ 2349} 2350SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2351SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2352SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2353SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2354SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2355SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2356SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2357SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2358SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2359#undef SHOW_FUNCTION 2360 2361#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2362static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 2363{ \ 2364 struct cfq_data *cfqd = e->elevator_data; \ 2365 unsigned int __data; \ 2366 int ret = cfq_var_store(&__data, (page), count); \ 2367 if (__data < (MIN)) \ 2368 __data = (MIN); \ 2369 else if (__data > (MAX)) \ 2370 __data = (MAX); \ 2371 if (__CONV) \ 2372 *(__PTR) = msecs_to_jiffies(__data); \ 2373 else \ 2374 *(__PTR) = __data; \ 2375 return ret; \ 2376} 2377STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2378STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 2379 UINT_MAX, 1); 2380STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 2381 UINT_MAX, 1); 2382STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2383STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 2384 UINT_MAX, 0); 2385STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2386STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2387STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2388STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2389 UINT_MAX, 0); 2390#undef STORE_FUNCTION 2391 2392#define CFQ_ATTR(name) \ 2393 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2394 2395static struct elv_fs_entry cfq_attrs[] = { 2396 CFQ_ATTR(quantum), 2397 CFQ_ATTR(fifo_expire_sync), 2398 CFQ_ATTR(fifo_expire_async), 2399 CFQ_ATTR(back_seek_max), 2400 CFQ_ATTR(back_seek_penalty), 2401 CFQ_ATTR(slice_sync), 2402 CFQ_ATTR(slice_async), 2403 CFQ_ATTR(slice_async_rq), 2404 CFQ_ATTR(slice_idle), 2405 __ATTR_NULL 2406}; 2407 2408static struct elevator_type iosched_cfq = { 2409 .ops = { 2410 .elevator_merge_fn = cfq_merge, 2411 .elevator_merged_fn = cfq_merged_request, 2412 .elevator_merge_req_fn = cfq_merged_requests, 2413 .elevator_allow_merge_fn = cfq_allow_merge, 2414 .elevator_dispatch_fn = cfq_dispatch_requests, 2415 .elevator_add_req_fn = cfq_insert_request, 2416 .elevator_activate_req_fn = cfq_activate_request, 2417 .elevator_deactivate_req_fn = cfq_deactivate_request, 2418 .elevator_queue_empty_fn = cfq_queue_empty, 2419 .elevator_completed_req_fn = cfq_completed_request, 2420 .elevator_former_req_fn = elv_rb_former_request, 2421 .elevator_latter_req_fn = elv_rb_latter_request, 2422 .elevator_set_req_fn = cfq_set_request, 2423 .elevator_put_req_fn = cfq_put_request, 2424 .elevator_may_queue_fn = cfq_may_queue, 2425 .elevator_init_fn = cfq_init_queue, 2426 .elevator_exit_fn = cfq_exit_queue, 2427 .trim = cfq_free_io_context, 2428 }, 2429 .elevator_attrs = cfq_attrs, 2430 .elevator_name = "cfq", 2431 .elevator_owner = THIS_MODULE, 2432}; 2433 2434static int __init cfq_init(void) 2435{ 2436 /* 2437 * could be 0 on HZ < 1000 setups 2438 */ 2439 if (!cfq_slice_async) 2440 cfq_slice_async = 1; 2441 if (!cfq_slice_idle) 2442 cfq_slice_idle = 1; 2443 2444 if (cfq_slab_setup()) 2445 return -ENOMEM; 2446 2447 elv_register(&iosched_cfq); 2448 2449 return 0; 2450} 2451 2452static void __exit cfq_exit(void) 2453{ 2454 DECLARE_COMPLETION_ONSTACK(all_gone); 2455 elv_unregister(&iosched_cfq); 2456 ioc_gone = &all_gone; 2457 /* ioc_gone's update must be visible before reading ioc_count */ 2458 smp_wmb(); 2459 2460 /* 2461 * this also protects us from entering cfq_slab_kill() with 2462 * pending RCU callbacks 2463 */ 2464 if (elv_ioc_count_read(ioc_count)) 2465 wait_for_completion(&all_gone); 2466 cfq_slab_kill(); 2467} 2468 2469module_init(cfq_init); 2470module_exit(cfq_exit); 2471 2472MODULE_AUTHOR("Jens Axboe"); 2473MODULE_LICENSE("GPL"); 2474MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 2475