cfq-iosched.c revision a3cc86c2f00839453d2dbeb46bfc44e885b073db
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/slab.h> 11#include <linux/blkdev.h> 12#include <linux/elevator.h> 13#include <linux/jiffies.h> 14#include <linux/rbtree.h> 15#include <linux/ioprio.h> 16#include <linux/blktrace_api.h> 17#include "blk.h" 18#include "blk-cgroup.h" 19 20/* 21 * tunables 22 */ 23/* max queue in one round of service */ 24static const int cfq_quantum = 8; 25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 26/* maximum backwards seek, in KiB */ 27static const int cfq_back_max = 16 * 1024; 28/* penalty of a backwards seek */ 29static const int cfq_back_penalty = 2; 30static const int cfq_slice_sync = HZ / 10; 31static int cfq_slice_async = HZ / 25; 32static const int cfq_slice_async_rq = 2; 33static int cfq_slice_idle = HZ / 125; 34static int cfq_group_idle = HZ / 125; 35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 36static const int cfq_hist_divisor = 4; 37 38/* 39 * offset from end of service tree 40 */ 41#define CFQ_IDLE_DELAY (HZ / 5) 42 43/* 44 * below this threshold, we consider thinktime immediate 45 */ 46#define CFQ_MIN_TT (2) 47 48#define CFQ_SLICE_SCALE (5) 49#define CFQ_HW_QUEUE_MIN (5) 50#define CFQ_SERVICE_SHIFT 12 51 52#define CFQQ_SEEK_THR (sector_t)(8 * 100) 53#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) 54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 56 57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) 58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) 59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) 60 61static struct kmem_cache *cfq_pool; 62 63#define CFQ_PRIO_LISTS IOPRIO_BE_NR 64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 65#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 66 67#define sample_valid(samples) ((samples) > 80) 68#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) 69 70struct cfq_ttime { 71 unsigned long last_end_request; 72 73 unsigned long ttime_total; 74 unsigned long ttime_samples; 75 unsigned long ttime_mean; 76}; 77 78/* 79 * Most of our rbtree usage is for sorting with min extraction, so 80 * if we cache the leftmost node we don't have to walk down the tree 81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 82 * move this into the elevator for the rq sorting as well. 83 */ 84struct cfq_rb_root { 85 struct rb_root rb; 86 struct rb_node *left; 87 unsigned count; 88 u64 min_vdisktime; 89 struct cfq_ttime ttime; 90}; 91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ 92 .ttime = {.last_end_request = jiffies,},} 93 94/* 95 * Per process-grouping structure 96 */ 97struct cfq_queue { 98 /* reference count */ 99 int ref; 100 /* various state flags, see below */ 101 unsigned int flags; 102 /* parent cfq_data */ 103 struct cfq_data *cfqd; 104 /* service_tree member */ 105 struct rb_node rb_node; 106 /* service_tree key */ 107 unsigned long rb_key; 108 /* prio tree member */ 109 struct rb_node p_node; 110 /* prio tree root we belong to, if any */ 111 struct rb_root *p_root; 112 /* sorted list of pending requests */ 113 struct rb_root sort_list; 114 /* if fifo isn't expired, next request to serve */ 115 struct request *next_rq; 116 /* requests queued in sort_list */ 117 int queued[2]; 118 /* currently allocated requests */ 119 int allocated[2]; 120 /* fifo list of requests in sort_list */ 121 struct list_head fifo; 122 123 /* time when queue got scheduled in to dispatch first request. */ 124 unsigned long dispatch_start; 125 unsigned int allocated_slice; 126 unsigned int slice_dispatch; 127 /* time when first request from queue completed and slice started. */ 128 unsigned long slice_start; 129 unsigned long slice_end; 130 long slice_resid; 131 132 /* pending priority requests */ 133 int prio_pending; 134 /* number of requests that are on the dispatch list or inside driver */ 135 int dispatched; 136 137 /* io prio of this group */ 138 unsigned short ioprio, org_ioprio; 139 unsigned short ioprio_class; 140 141 pid_t pid; 142 143 u32 seek_history; 144 sector_t last_request_pos; 145 146 struct cfq_rb_root *service_tree; 147 struct cfq_queue *new_cfqq; 148 struct cfq_group *cfqg; 149 /* Number of sectors dispatched from queue in single dispatch round */ 150 unsigned long nr_sectors; 151}; 152 153/* 154 * First index in the service_trees. 155 * IDLE is handled separately, so it has negative index 156 */ 157enum wl_class_t { 158 BE_WORKLOAD = 0, 159 RT_WORKLOAD = 1, 160 IDLE_WORKLOAD = 2, 161 CFQ_PRIO_NR, 162}; 163 164/* 165 * Second index in the service_trees. 166 */ 167enum wl_type_t { 168 ASYNC_WORKLOAD = 0, 169 SYNC_NOIDLE_WORKLOAD = 1, 170 SYNC_WORKLOAD = 2 171}; 172 173struct cfqg_stats { 174#ifdef CONFIG_CFQ_GROUP_IOSCHED 175 /* total bytes transferred */ 176 struct blkg_rwstat service_bytes; 177 /* total IOs serviced, post merge */ 178 struct blkg_rwstat serviced; 179 /* number of ios merged */ 180 struct blkg_rwstat merged; 181 /* total time spent on device in ns, may not be accurate w/ queueing */ 182 struct blkg_rwstat service_time; 183 /* total time spent waiting in scheduler queue in ns */ 184 struct blkg_rwstat wait_time; 185 /* number of IOs queued up */ 186 struct blkg_rwstat queued; 187 /* total sectors transferred */ 188 struct blkg_stat sectors; 189 /* total disk time and nr sectors dispatched by this group */ 190 struct blkg_stat time; 191#ifdef CONFIG_DEBUG_BLK_CGROUP 192 /* time not charged to this cgroup */ 193 struct blkg_stat unaccounted_time; 194 /* sum of number of ios queued across all samples */ 195 struct blkg_stat avg_queue_size_sum; 196 /* count of samples taken for average */ 197 struct blkg_stat avg_queue_size_samples; 198 /* how many times this group has been removed from service tree */ 199 struct blkg_stat dequeue; 200 /* total time spent waiting for it to be assigned a timeslice. */ 201 struct blkg_stat group_wait_time; 202 /* time spent idling for this blkcg_gq */ 203 struct blkg_stat idle_time; 204 /* total time with empty current active q with other requests queued */ 205 struct blkg_stat empty_time; 206 /* fields after this shouldn't be cleared on stat reset */ 207 uint64_t start_group_wait_time; 208 uint64_t start_idle_time; 209 uint64_t start_empty_time; 210 uint16_t flags; 211#endif /* CONFIG_DEBUG_BLK_CGROUP */ 212#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 213}; 214 215/* This is per cgroup per device grouping structure */ 216struct cfq_group { 217 /* must be the first member */ 218 struct blkg_policy_data pd; 219 220 /* group service_tree member */ 221 struct rb_node rb_node; 222 223 /* group service_tree key */ 224 u64 vdisktime; 225 226 /* 227 * The number of active cfqgs and sum of their weights under this 228 * cfqg. This covers this cfqg's leaf_weight and all children's 229 * weights, but does not cover weights of further descendants. 230 * 231 * If a cfqg is on the service tree, it's active. An active cfqg 232 * also activates its parent and contributes to the children_weight 233 * of the parent. 234 */ 235 int nr_active; 236 unsigned int children_weight; 237 238 /* 239 * vfraction is the fraction of vdisktime that the tasks in this 240 * cfqg are entitled to. This is determined by compounding the 241 * ratios walking up from this cfqg to the root. 242 * 243 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all 244 * vfractions on a service tree is approximately 1. The sum may 245 * deviate a bit due to rounding errors and fluctuations caused by 246 * cfqgs entering and leaving the service tree. 247 */ 248 unsigned int vfraction; 249 250 /* 251 * There are two weights - (internal) weight is the weight of this 252 * cfqg against the sibling cfqgs. leaf_weight is the wight of 253 * this cfqg against the child cfqgs. For the root cfqg, both 254 * weights are kept in sync for backward compatibility. 255 */ 256 unsigned int weight; 257 unsigned int new_weight; 258 unsigned int dev_weight; 259 260 unsigned int leaf_weight; 261 unsigned int new_leaf_weight; 262 unsigned int dev_leaf_weight; 263 264 /* number of cfqq currently on this group */ 265 int nr_cfqq; 266 267 /* 268 * Per group busy queues average. Useful for workload slice calc. We 269 * create the array for each prio class but at run time it is used 270 * only for RT and BE class and slot for IDLE class remains unused. 271 * This is primarily done to avoid confusion and a gcc warning. 272 */ 273 unsigned int busy_queues_avg[CFQ_PRIO_NR]; 274 /* 275 * rr lists of queues with requests. We maintain service trees for 276 * RT and BE classes. These trees are subdivided in subclasses 277 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE 278 * class there is no subclassification and all the cfq queues go on 279 * a single tree service_tree_idle. 280 * Counts are embedded in the cfq_rb_root 281 */ 282 struct cfq_rb_root service_trees[2][3]; 283 struct cfq_rb_root service_tree_idle; 284 285 unsigned long saved_wl_slice; 286 enum wl_type_t saved_wl_type; 287 enum wl_class_t saved_wl_class; 288 289 /* number of requests that are on the dispatch list or inside driver */ 290 int dispatched; 291 struct cfq_ttime ttime; 292 struct cfqg_stats stats; /* stats for this cfqg */ 293 struct cfqg_stats dead_stats; /* stats pushed from dead children */ 294}; 295 296struct cfq_io_cq { 297 struct io_cq icq; /* must be the first member */ 298 struct cfq_queue *cfqq[2]; 299 struct cfq_ttime ttime; 300 int ioprio; /* the current ioprio */ 301#ifdef CONFIG_CFQ_GROUP_IOSCHED 302 uint64_t blkcg_id; /* the current blkcg ID */ 303#endif 304}; 305 306/* 307 * Per block device queue structure 308 */ 309struct cfq_data { 310 struct request_queue *queue; 311 /* Root service tree for cfq_groups */ 312 struct cfq_rb_root grp_service_tree; 313 struct cfq_group *root_group; 314 315 /* 316 * The priority currently being served 317 */ 318 enum wl_class_t serving_wl_class; 319 enum wl_type_t serving_wl_type; 320 unsigned long workload_expires; 321 struct cfq_group *serving_group; 322 323 /* 324 * Each priority tree is sorted by next_request position. These 325 * trees are used when determining if two or more queues are 326 * interleaving requests (see cfq_close_cooperator). 327 */ 328 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 329 330 unsigned int busy_queues; 331 unsigned int busy_sync_queues; 332 333 int rq_in_driver; 334 int rq_in_flight[2]; 335 336 /* 337 * queue-depth detection 338 */ 339 int rq_queued; 340 int hw_tag; 341 /* 342 * hw_tag can be 343 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) 344 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) 345 * 0 => no NCQ 346 */ 347 int hw_tag_est_depth; 348 unsigned int hw_tag_samples; 349 350 /* 351 * idle window management 352 */ 353 struct timer_list idle_slice_timer; 354 struct work_struct unplug_work; 355 356 struct cfq_queue *active_queue; 357 struct cfq_io_cq *active_cic; 358 359 /* 360 * async queue for each priority case 361 */ 362 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 363 struct cfq_queue *async_idle_cfqq; 364 365 sector_t last_position; 366 367 /* 368 * tunables, see top of file 369 */ 370 unsigned int cfq_quantum; 371 unsigned int cfq_fifo_expire[2]; 372 unsigned int cfq_back_penalty; 373 unsigned int cfq_back_max; 374 unsigned int cfq_slice[2]; 375 unsigned int cfq_slice_async_rq; 376 unsigned int cfq_slice_idle; 377 unsigned int cfq_group_idle; 378 unsigned int cfq_latency; 379 unsigned int cfq_target_latency; 380 381 /* 382 * Fallback dummy cfqq for extreme OOM conditions 383 */ 384 struct cfq_queue oom_cfqq; 385 386 unsigned long last_delayed_sync; 387}; 388 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 390 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg, 392 enum wl_class_t class, 393 enum wl_type_t type) 394{ 395 if (!cfqg) 396 return NULL; 397 398 if (class == IDLE_WORKLOAD) 399 return &cfqg->service_tree_idle; 400 401 return &cfqg->service_trees[class][type]; 402} 403 404enum cfqq_state_flags { 405 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 406 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 407 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ 408 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 409 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 410 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 411 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 412 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 413 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ 416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 418}; 419 420#define CFQ_CFQQ_FNS(name) \ 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 422{ \ 423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 424} \ 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 426{ \ 427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 428} \ 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 430{ \ 431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 432} 433 434CFQ_CFQQ_FNS(on_rr); 435CFQ_CFQQ_FNS(wait_request); 436CFQ_CFQQ_FNS(must_dispatch); 437CFQ_CFQQ_FNS(must_alloc_slice); 438CFQ_CFQQ_FNS(fifo_expire); 439CFQ_CFQQ_FNS(idle_window); 440CFQ_CFQQ_FNS(prio_changed); 441CFQ_CFQQ_FNS(slice_new); 442CFQ_CFQQ_FNS(sync); 443CFQ_CFQQ_FNS(coop); 444CFQ_CFQQ_FNS(split_coop); 445CFQ_CFQQ_FNS(deep); 446CFQ_CFQQ_FNS(wait_busy); 447#undef CFQ_CFQQ_FNS 448 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd) 450{ 451 return pd ? container_of(pd, struct cfq_group, pd) : NULL; 452} 453 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) 455{ 456 return pd_to_blkg(&cfqg->pd); 457} 458 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 460 461/* cfqg stats flags */ 462enum cfqg_stats_flags { 463 CFQG_stats_waiting = 0, 464 CFQG_stats_idling, 465 CFQG_stats_empty, 466}; 467 468#define CFQG_FLAG_FNS(name) \ 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ 470{ \ 471 stats->flags |= (1 << CFQG_stats_##name); \ 472} \ 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ 474{ \ 475 stats->flags &= ~(1 << CFQG_stats_##name); \ 476} \ 477static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ 478{ \ 479 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ 480} \ 481 482CFQG_FLAG_FNS(waiting) 483CFQG_FLAG_FNS(idling) 484CFQG_FLAG_FNS(empty) 485#undef CFQG_FLAG_FNS 486 487/* This should be called with the queue_lock held. */ 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) 489{ 490 unsigned long long now; 491 492 if (!cfqg_stats_waiting(stats)) 493 return; 494 495 now = sched_clock(); 496 if (time_after64(now, stats->start_group_wait_time)) 497 blkg_stat_add(&stats->group_wait_time, 498 now - stats->start_group_wait_time); 499 cfqg_stats_clear_waiting(stats); 500} 501 502/* This should be called with the queue_lock held. */ 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, 504 struct cfq_group *curr_cfqg) 505{ 506 struct cfqg_stats *stats = &cfqg->stats; 507 508 if (cfqg_stats_waiting(stats)) 509 return; 510 if (cfqg == curr_cfqg) 511 return; 512 stats->start_group_wait_time = sched_clock(); 513 cfqg_stats_mark_waiting(stats); 514} 515 516/* This should be called with the queue_lock held. */ 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) 518{ 519 unsigned long long now; 520 521 if (!cfqg_stats_empty(stats)) 522 return; 523 524 now = sched_clock(); 525 if (time_after64(now, stats->start_empty_time)) 526 blkg_stat_add(&stats->empty_time, 527 now - stats->start_empty_time); 528 cfqg_stats_clear_empty(stats); 529} 530 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) 532{ 533 blkg_stat_add(&cfqg->stats.dequeue, 1); 534} 535 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) 537{ 538 struct cfqg_stats *stats = &cfqg->stats; 539 540 if (blkg_rwstat_total(&stats->queued)) 541 return; 542 543 /* 544 * group is already marked empty. This can happen if cfqq got new 545 * request in parent group and moved to this group while being added 546 * to service tree. Just ignore the event and move on. 547 */ 548 if (cfqg_stats_empty(stats)) 549 return; 550 551 stats->start_empty_time = sched_clock(); 552 cfqg_stats_mark_empty(stats); 553} 554 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) 556{ 557 struct cfqg_stats *stats = &cfqg->stats; 558 559 if (cfqg_stats_idling(stats)) { 560 unsigned long long now = sched_clock(); 561 562 if (time_after64(now, stats->start_idle_time)) 563 blkg_stat_add(&stats->idle_time, 564 now - stats->start_idle_time); 565 cfqg_stats_clear_idling(stats); 566 } 567} 568 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) 570{ 571 struct cfqg_stats *stats = &cfqg->stats; 572 573 BUG_ON(cfqg_stats_idling(stats)); 574 575 stats->start_idle_time = sched_clock(); 576 cfqg_stats_mark_idling(stats); 577} 578 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) 580{ 581 struct cfqg_stats *stats = &cfqg->stats; 582 583 blkg_stat_add(&stats->avg_queue_size_sum, 584 blkg_rwstat_total(&stats->queued)); 585 blkg_stat_add(&stats->avg_queue_size_samples, 1); 586 cfqg_stats_update_group_wait_time(stats); 587} 588 589#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 590 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { } 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } 598 599#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 600 601#ifdef CONFIG_CFQ_GROUP_IOSCHED 602 603static struct blkcg_policy blkcg_policy_cfq; 604 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) 606{ 607 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); 608} 609 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) 611{ 612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; 613 614 return pblkg ? blkg_to_cfqg(pblkg) : NULL; 615} 616 617static inline void cfqg_get(struct cfq_group *cfqg) 618{ 619 return blkg_get(cfqg_to_blkg(cfqg)); 620} 621 622static inline void cfqg_put(struct cfq_group *cfqg) 623{ 624 return blkg_put(cfqg_to_blkg(cfqg)); 625} 626 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ 628 char __pbuf[128]; \ 629 \ 630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ 631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ 632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 634 __pbuf, ##args); \ 635} while (0) 636 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ 638 char __pbuf[128]; \ 639 \ 640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ 641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ 642} while (0) 643 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 645 struct cfq_group *curr_cfqg, int rw) 646{ 647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); 648 cfqg_stats_end_empty_time(&cfqg->stats); 649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); 650} 651 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, 653 unsigned long time, unsigned long unaccounted_time) 654{ 655 blkg_stat_add(&cfqg->stats.time, time); 656#ifdef CONFIG_DEBUG_BLK_CGROUP 657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); 658#endif 659} 660 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) 662{ 663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); 664} 665 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) 667{ 668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1); 669} 670 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, 672 uint64_t bytes, int rw) 673{ 674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); 675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1); 676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes); 677} 678 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 680 uint64_t start_time, uint64_t io_start_time, int rw) 681{ 682 struct cfqg_stats *stats = &cfqg->stats; 683 unsigned long long now = sched_clock(); 684 685 if (time_after64(now, io_start_time)) 686 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); 687 if (time_after64(io_start_time, start_time)) 688 blkg_rwstat_add(&stats->wait_time, rw, 689 io_start_time - start_time); 690} 691 692/* @stats = 0 */ 693static void cfqg_stats_reset(struct cfqg_stats *stats) 694{ 695 /* queued stats shouldn't be cleared */ 696 blkg_rwstat_reset(&stats->service_bytes); 697 blkg_rwstat_reset(&stats->serviced); 698 blkg_rwstat_reset(&stats->merged); 699 blkg_rwstat_reset(&stats->service_time); 700 blkg_rwstat_reset(&stats->wait_time); 701 blkg_stat_reset(&stats->time); 702#ifdef CONFIG_DEBUG_BLK_CGROUP 703 blkg_stat_reset(&stats->unaccounted_time); 704 blkg_stat_reset(&stats->avg_queue_size_sum); 705 blkg_stat_reset(&stats->avg_queue_size_samples); 706 blkg_stat_reset(&stats->dequeue); 707 blkg_stat_reset(&stats->group_wait_time); 708 blkg_stat_reset(&stats->idle_time); 709 blkg_stat_reset(&stats->empty_time); 710#endif 711} 712 713/* @to += @from */ 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) 715{ 716 /* queued stats shouldn't be cleared */ 717 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); 718 blkg_rwstat_merge(&to->serviced, &from->serviced); 719 blkg_rwstat_merge(&to->merged, &from->merged); 720 blkg_rwstat_merge(&to->service_time, &from->service_time); 721 blkg_rwstat_merge(&to->wait_time, &from->wait_time); 722 blkg_stat_merge(&from->time, &from->time); 723#ifdef CONFIG_DEBUG_BLK_CGROUP 724 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); 725 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 726 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); 727 blkg_stat_merge(&to->dequeue, &from->dequeue); 728 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); 729 blkg_stat_merge(&to->idle_time, &from->idle_time); 730 blkg_stat_merge(&to->empty_time, &from->empty_time); 731#endif 732} 733 734/* 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' 736 * recursive stats can still account for the amount used by this cfqg after 737 * it's gone. 738 */ 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) 740{ 741 struct cfq_group *parent = cfqg_parent(cfqg); 742 743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); 744 745 if (unlikely(!parent)) 746 return; 747 748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); 749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); 750 cfqg_stats_reset(&cfqg->stats); 751 cfqg_stats_reset(&cfqg->dead_stats); 752} 753 754#else /* CONFIG_CFQ_GROUP_IOSCHED */ 755 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } 757static inline void cfqg_get(struct cfq_group *cfqg) { } 758static inline void cfqg_put(struct cfq_group *cfqg) { } 759 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ 762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 764 ##args) 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) 766 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 768 struct cfq_group *curr_cfqg, int rw) { } 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, 770 unsigned long time, unsigned long unaccounted_time) { } 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, 774 uint64_t bytes, int rw) { } 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 776 uint64_t start_time, uint64_t io_start_time, int rw) { } 777 778#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 779 780#define cfq_log(cfqd, fmt, args...) \ 781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 782 783/* Traverses through cfq group service trees */ 784#define for_each_cfqg_st(cfqg, i, j, st) \ 785 for (i = 0; i <= IDLE_WORKLOAD; i++) \ 786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ 787 : &cfqg->service_tree_idle; \ 788 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ 789 (i == IDLE_WORKLOAD && j == 0); \ 790 j++, st = i < IDLE_WORKLOAD ? \ 791 &cfqg->service_trees[i][j]: NULL) \ 792 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, 794 struct cfq_ttime *ttime, bool group_idle) 795{ 796 unsigned long slice; 797 if (!sample_valid(ttime->ttime_samples)) 798 return false; 799 if (group_idle) 800 slice = cfqd->cfq_group_idle; 801 else 802 slice = cfqd->cfq_slice_idle; 803 return ttime->ttime_mean > slice; 804} 805 806static inline bool iops_mode(struct cfq_data *cfqd) 807{ 808 /* 809 * If we are not idling on queues and it is a NCQ drive, parallel 810 * execution of requests is on and measuring time is not possible 811 * in most of the cases until and unless we drive shallower queue 812 * depths and that becomes a performance bottleneck. In such cases 813 * switch to start providing fairness in terms of number of IOs. 814 */ 815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag) 816 return true; 817 else 818 return false; 819} 820 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) 822{ 823 if (cfq_class_idle(cfqq)) 824 return IDLE_WORKLOAD; 825 if (cfq_class_rt(cfqq)) 826 return RT_WORKLOAD; 827 return BE_WORKLOAD; 828} 829 830 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) 832{ 833 if (!cfq_cfqq_sync(cfqq)) 834 return ASYNC_WORKLOAD; 835 if (!cfq_cfqq_idle_window(cfqq)) 836 return SYNC_NOIDLE_WORKLOAD; 837 return SYNC_WORKLOAD; 838} 839 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class, 841 struct cfq_data *cfqd, 842 struct cfq_group *cfqg) 843{ 844 if (wl_class == IDLE_WORKLOAD) 845 return cfqg->service_tree_idle.count; 846 847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + 848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + 849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; 850} 851 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, 853 struct cfq_group *cfqg) 854{ 855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + 856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; 857} 858 859static void cfq_dispatch_insert(struct request_queue *, struct request *); 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, 861 struct cfq_io_cq *cic, struct bio *bio, 862 gfp_t gfp_mask); 863 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) 865{ 866 /* cic->icq is the first member, %NULL will convert to %NULL */ 867 return container_of(icq, struct cfq_io_cq, icq); 868} 869 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, 871 struct io_context *ioc) 872{ 873 if (ioc) 874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); 875 return NULL; 876} 877 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) 879{ 880 return cic->cfqq[is_sync]; 881} 882 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, 884 bool is_sync) 885{ 886 cic->cfqq[is_sync] = cfqq; 887} 888 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) 890{ 891 return cic->icq.q->elevator->elevator_data; 892} 893 894/* 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit 896 * set (in which case it could also be direct WRITE). 897 */ 898static inline bool cfq_bio_sync(struct bio *bio) 899{ 900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); 901} 902 903/* 904 * scheduler run of queue, if there are requests pending and no one in the 905 * driver that will restart queueing 906 */ 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 908{ 909 if (cfqd->busy_queues) { 910 cfq_log(cfqd, "schedule dispatch"); 911 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 912 } 913} 914 915/* 916 * Scale schedule slice based on io priority. Use the sync time slice only 917 * if a queue is marked sync and has sync io queued. A sync queue with async 918 * io only, should not get full sync slice length. 919 */ 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, 921 unsigned short prio) 922{ 923 const int base_slice = cfqd->cfq_slice[sync]; 924 925 WARN_ON(prio >= IOPRIO_BE_NR); 926 927 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 928} 929 930static inline int 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 932{ 933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 934} 935 936/** 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight 938 * @charge: disk time being charged 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT 940 * 941 * Scale @charge according to @vfraction, which is in range (0, 1]. The 942 * scaling is inversely proportional. 943 * 944 * scaled = charge / vfraction 945 * 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. 947 */ 948static inline u64 cfqg_scale_charge(unsigned long charge, 949 unsigned int vfraction) 950{ 951 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ 952 953 /* charge / vfraction */ 954 c <<= CFQ_SERVICE_SHIFT; 955 do_div(c, vfraction); 956 return c; 957} 958 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) 960{ 961 s64 delta = (s64)(vdisktime - min_vdisktime); 962 if (delta > 0) 963 min_vdisktime = vdisktime; 964 965 return min_vdisktime; 966} 967 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) 969{ 970 s64 delta = (s64)(vdisktime - min_vdisktime); 971 if (delta < 0) 972 min_vdisktime = vdisktime; 973 974 return min_vdisktime; 975} 976 977static void update_min_vdisktime(struct cfq_rb_root *st) 978{ 979 struct cfq_group *cfqg; 980 981 if (st->left) { 982 cfqg = rb_entry_cfqg(st->left); 983 st->min_vdisktime = max_vdisktime(st->min_vdisktime, 984 cfqg->vdisktime); 985 } 986} 987 988/* 989 * get averaged number of queues of RT/BE priority. 990 * average is updated, with a formula that gives more weight to higher numbers, 991 * to quickly follows sudden increases and decrease slowly 992 */ 993 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, 995 struct cfq_group *cfqg, bool rt) 996{ 997 unsigned min_q, max_q; 998 unsigned mult = cfq_hist_divisor - 1; 999 unsigned round = cfq_hist_divisor / 2; 1000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); 1001 1002 min_q = min(cfqg->busy_queues_avg[rt], busy); 1003 max_q = max(cfqg->busy_queues_avg[rt], busy); 1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / 1005 cfq_hist_divisor; 1006 return cfqg->busy_queues_avg[rt]; 1007} 1008 1009static inline unsigned 1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) 1011{ 1012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; 1013} 1014 1015static inline unsigned 1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1017{ 1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); 1019 if (cfqd->cfq_latency) { 1020 /* 1021 * interested queues (we consider only the ones with the same 1022 * priority class in the cfq group) 1023 */ 1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, 1025 cfq_class_rt(cfqq)); 1026 unsigned sync_slice = cfqd->cfq_slice[1]; 1027 unsigned expect_latency = sync_slice * iq; 1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); 1029 1030 if (expect_latency > group_slice) { 1031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; 1032 /* scale low_slice according to IO priority 1033 * and sync vs async */ 1034 unsigned low_slice = 1035 min(slice, base_low_slice * slice / sync_slice); 1036 /* the adapted slice value is scaled to fit all iqs 1037 * into the target latency */ 1038 slice = max(slice * group_slice / expect_latency, 1039 low_slice); 1040 } 1041 } 1042 return slice; 1043} 1044 1045static inline void 1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1047{ 1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); 1049 1050 cfqq->slice_start = jiffies; 1051 cfqq->slice_end = jiffies + slice; 1052 cfqq->allocated_slice = slice; 1053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 1054} 1055 1056/* 1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 1058 * isn't valid until the first request from the dispatch is activated 1059 * and the slice time set. 1060 */ 1061static inline bool cfq_slice_used(struct cfq_queue *cfqq) 1062{ 1063 if (cfq_cfqq_slice_new(cfqq)) 1064 return false; 1065 if (time_before(jiffies, cfqq->slice_end)) 1066 return false; 1067 1068 return true; 1069} 1070 1071/* 1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 1073 * We choose the request that is closest to the head right now. Distance 1074 * behind the head is penalized and only allowed to a certain extent. 1075 */ 1076static struct request * 1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) 1078{ 1079 sector_t s1, s2, d1 = 0, d2 = 0; 1080 unsigned long back_max; 1081#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 1082#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 1083 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 1084 1085 if (rq1 == NULL || rq1 == rq2) 1086 return rq2; 1087 if (rq2 == NULL) 1088 return rq1; 1089 1090 if (rq_is_sync(rq1) != rq_is_sync(rq2)) 1091 return rq_is_sync(rq1) ? rq1 : rq2; 1092 1093 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) 1094 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; 1095 1096 s1 = blk_rq_pos(rq1); 1097 s2 = blk_rq_pos(rq2); 1098 1099 /* 1100 * by definition, 1KiB is 2 sectors 1101 */ 1102 back_max = cfqd->cfq_back_max * 2; 1103 1104 /* 1105 * Strict one way elevator _except_ in the case where we allow 1106 * short backward seeks which are biased as twice the cost of a 1107 * similar forward seek. 1108 */ 1109 if (s1 >= last) 1110 d1 = s1 - last; 1111 else if (s1 + back_max >= last) 1112 d1 = (last - s1) * cfqd->cfq_back_penalty; 1113 else 1114 wrap |= CFQ_RQ1_WRAP; 1115 1116 if (s2 >= last) 1117 d2 = s2 - last; 1118 else if (s2 + back_max >= last) 1119 d2 = (last - s2) * cfqd->cfq_back_penalty; 1120 else 1121 wrap |= CFQ_RQ2_WRAP; 1122 1123 /* Found required data */ 1124 1125 /* 1126 * By doing switch() on the bit mask "wrap" we avoid having to 1127 * check two variables for all permutations: --> faster! 1128 */ 1129 switch (wrap) { 1130 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 1131 if (d1 < d2) 1132 return rq1; 1133 else if (d2 < d1) 1134 return rq2; 1135 else { 1136 if (s1 >= s2) 1137 return rq1; 1138 else 1139 return rq2; 1140 } 1141 1142 case CFQ_RQ2_WRAP: 1143 return rq1; 1144 case CFQ_RQ1_WRAP: 1145 return rq2; 1146 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 1147 default: 1148 /* 1149 * Since both rqs are wrapped, 1150 * start with the one that's further behind head 1151 * (--> only *one* back seek required), 1152 * since back seek takes more time than forward. 1153 */ 1154 if (s1 <= s2) 1155 return rq1; 1156 else 1157 return rq2; 1158 } 1159} 1160 1161/* 1162 * The below is leftmost cache rbtree addon 1163 */ 1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 1165{ 1166 /* Service tree is empty */ 1167 if (!root->count) 1168 return NULL; 1169 1170 if (!root->left) 1171 root->left = rb_first(&root->rb); 1172 1173 if (root->left) 1174 return rb_entry(root->left, struct cfq_queue, rb_node); 1175 1176 return NULL; 1177} 1178 1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) 1180{ 1181 if (!root->left) 1182 root->left = rb_first(&root->rb); 1183 1184 if (root->left) 1185 return rb_entry_cfqg(root->left); 1186 1187 return NULL; 1188} 1189 1190static void rb_erase_init(struct rb_node *n, struct rb_root *root) 1191{ 1192 rb_erase(n, root); 1193 RB_CLEAR_NODE(n); 1194} 1195 1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 1197{ 1198 if (root->left == n) 1199 root->left = NULL; 1200 rb_erase_init(n, &root->rb); 1201 --root->count; 1202} 1203 1204/* 1205 * would be nice to take fifo expire time into account as well 1206 */ 1207static struct request * 1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1209 struct request *last) 1210{ 1211 struct rb_node *rbnext = rb_next(&last->rb_node); 1212 struct rb_node *rbprev = rb_prev(&last->rb_node); 1213 struct request *next = NULL, *prev = NULL; 1214 1215 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 1216 1217 if (rbprev) 1218 prev = rb_entry_rq(rbprev); 1219 1220 if (rbnext) 1221 next = rb_entry_rq(rbnext); 1222 else { 1223 rbnext = rb_first(&cfqq->sort_list); 1224 if (rbnext && rbnext != &last->rb_node) 1225 next = rb_entry_rq(rbnext); 1226 } 1227 1228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); 1229} 1230 1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 1232 struct cfq_queue *cfqq) 1233{ 1234 /* 1235 * just an approximation, should be ok. 1236 */ 1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - 1238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 1239} 1240 1241static inline s64 1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) 1243{ 1244 return cfqg->vdisktime - st->min_vdisktime; 1245} 1246 1247static void 1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) 1249{ 1250 struct rb_node **node = &st->rb.rb_node; 1251 struct rb_node *parent = NULL; 1252 struct cfq_group *__cfqg; 1253 s64 key = cfqg_key(st, cfqg); 1254 int left = 1; 1255 1256 while (*node != NULL) { 1257 parent = *node; 1258 __cfqg = rb_entry_cfqg(parent); 1259 1260 if (key < cfqg_key(st, __cfqg)) 1261 node = &parent->rb_left; 1262 else { 1263 node = &parent->rb_right; 1264 left = 0; 1265 } 1266 } 1267 1268 if (left) 1269 st->left = &cfqg->rb_node; 1270 1271 rb_link_node(&cfqg->rb_node, parent, node); 1272 rb_insert_color(&cfqg->rb_node, &st->rb); 1273} 1274 1275static void 1276cfq_update_group_weight(struct cfq_group *cfqg) 1277{ 1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1279 1280 if (cfqg->new_weight) { 1281 cfqg->weight = cfqg->new_weight; 1282 cfqg->new_weight = 0; 1283 } 1284 1285 if (cfqg->new_leaf_weight) { 1286 cfqg->leaf_weight = cfqg->new_leaf_weight; 1287 cfqg->new_leaf_weight = 0; 1288 } 1289} 1290 1291static void 1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) 1293{ 1294 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */ 1295 struct cfq_group *pos = cfqg; 1296 struct cfq_group *parent; 1297 bool propagate; 1298 1299 /* add to the service tree */ 1300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1301 1302 cfq_update_group_weight(cfqg); 1303 __cfq_group_service_tree_add(st, cfqg); 1304 1305 /* 1306 * Activate @cfqg and calculate the portion of vfraction @cfqg is 1307 * entitled to. vfraction is calculated by walking the tree 1308 * towards the root calculating the fraction it has at each level. 1309 * The compounded ratio is how much vfraction @cfqg owns. 1310 * 1311 * Start with the proportion tasks in this cfqg has against active 1312 * children cfqgs - its leaf_weight against children_weight. 1313 */ 1314 propagate = !pos->nr_active++; 1315 pos->children_weight += pos->leaf_weight; 1316 vfr = vfr * pos->leaf_weight / pos->children_weight; 1317 1318 /* 1319 * Compound ->weight walking up the tree. Both activation and 1320 * vfraction calculation are done in the same loop. Propagation 1321 * stops once an already activated node is met. vfraction 1322 * calculation should always continue to the root. 1323 */ 1324 while ((parent = cfqg_parent(pos))) { 1325 if (propagate) { 1326 propagate = !parent->nr_active++; 1327 parent->children_weight += pos->weight; 1328 } 1329 vfr = vfr * pos->weight / parent->children_weight; 1330 pos = parent; 1331 } 1332 1333 cfqg->vfraction = max_t(unsigned, vfr, 1); 1334} 1335 1336static void 1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1338{ 1339 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1340 struct cfq_group *__cfqg; 1341 struct rb_node *n; 1342 1343 cfqg->nr_cfqq++; 1344 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 1345 return; 1346 1347 /* 1348 * Currently put the group at the end. Later implement something 1349 * so that groups get lesser vtime based on their weights, so that 1350 * if group does not loose all if it was not continuously backlogged. 1351 */ 1352 n = rb_last(&st->rb); 1353 if (n) { 1354 __cfqg = rb_entry_cfqg(n); 1355 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1356 } else 1357 cfqg->vdisktime = st->min_vdisktime; 1358 cfq_group_service_tree_add(st, cfqg); 1359} 1360 1361static void 1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) 1363{ 1364 struct cfq_group *pos = cfqg; 1365 bool propagate; 1366 1367 /* 1368 * Undo activation from cfq_group_service_tree_add(). Deactivate 1369 * @cfqg and propagate deactivation upwards. 1370 */ 1371 propagate = !--pos->nr_active; 1372 pos->children_weight -= pos->leaf_weight; 1373 1374 while (propagate) { 1375 struct cfq_group *parent = cfqg_parent(pos); 1376 1377 /* @pos has 0 nr_active at this point */ 1378 WARN_ON_ONCE(pos->children_weight); 1379 pos->vfraction = 0; 1380 1381 if (!parent) 1382 break; 1383 1384 propagate = !--parent->nr_active; 1385 parent->children_weight -= pos->weight; 1386 pos = parent; 1387 } 1388 1389 /* remove from the service tree */ 1390 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 1391 cfq_rb_erase(&cfqg->rb_node, st); 1392} 1393 1394static void 1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) 1396{ 1397 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1398 1399 BUG_ON(cfqg->nr_cfqq < 1); 1400 cfqg->nr_cfqq--; 1401 1402 /* If there are other cfq queues under this group, don't delete it */ 1403 if (cfqg->nr_cfqq) 1404 return; 1405 1406 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 1407 cfq_group_service_tree_del(st, cfqg); 1408 cfqg->saved_wl_slice = 0; 1409 cfqg_stats_update_dequeue(cfqg); 1410} 1411 1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 1413 unsigned int *unaccounted_time) 1414{ 1415 unsigned int slice_used; 1416 1417 /* 1418 * Queue got expired before even a single request completed or 1419 * got expired immediately after first request completion. 1420 */ 1421 if (!cfqq->slice_start || cfqq->slice_start == jiffies) { 1422 /* 1423 * Also charge the seek time incurred to the group, otherwise 1424 * if there are mutiple queues in the group, each can dispatch 1425 * a single request on seeky media and cause lots of seek time 1426 * and group will never know it. 1427 */ 1428 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), 1429 1); 1430 } else { 1431 slice_used = jiffies - cfqq->slice_start; 1432 if (slice_used > cfqq->allocated_slice) { 1433 *unaccounted_time = slice_used - cfqq->allocated_slice; 1434 slice_used = cfqq->allocated_slice; 1435 } 1436 if (time_after(cfqq->slice_start, cfqq->dispatch_start)) 1437 *unaccounted_time += cfqq->slice_start - 1438 cfqq->dispatch_start; 1439 } 1440 1441 return slice_used; 1442} 1443 1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, 1445 struct cfq_queue *cfqq) 1446{ 1447 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1448 unsigned int used_sl, charge, unaccounted_sl = 0; 1449 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 1450 - cfqg->service_tree_idle.count; 1451 unsigned int vfr; 1452 1453 BUG_ON(nr_sync < 0); 1454 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); 1455 1456 if (iops_mode(cfqd)) 1457 charge = cfqq->slice_dispatch; 1458 else if (!cfq_cfqq_sync(cfqq) && !nr_sync) 1459 charge = cfqq->allocated_slice; 1460 1461 /* 1462 * Can't update vdisktime while on service tree and cfqg->vfraction 1463 * is valid only while on it. Cache vfr, leave the service tree, 1464 * update vdisktime and go back on. The re-addition to the tree 1465 * will also update the weights as necessary. 1466 */ 1467 vfr = cfqg->vfraction; 1468 cfq_group_service_tree_del(st, cfqg); 1469 cfqg->vdisktime += cfqg_scale_charge(charge, vfr); 1470 cfq_group_service_tree_add(st, cfqg); 1471 1472 /* This group is being expired. Save the context */ 1473 if (time_after(cfqd->workload_expires, jiffies)) { 1474 cfqg->saved_wl_slice = cfqd->workload_expires 1475 - jiffies; 1476 cfqg->saved_wl_type = cfqd->serving_wl_type; 1477 cfqg->saved_wl_class = cfqd->serving_wl_class; 1478 } else 1479 cfqg->saved_wl_slice = 0; 1480 1481 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 1482 st->min_vdisktime); 1483 cfq_log_cfqq(cfqq->cfqd, cfqq, 1484 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1485 used_sl, cfqq->slice_dispatch, charge, 1486 iops_mode(cfqd), cfqq->nr_sectors); 1487 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); 1488 cfqg_stats_set_start_empty_time(cfqg); 1489} 1490 1491/** 1492 * cfq_init_cfqg_base - initialize base part of a cfq_group 1493 * @cfqg: cfq_group to initialize 1494 * 1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED 1496 * is enabled or not. 1497 */ 1498static void cfq_init_cfqg_base(struct cfq_group *cfqg) 1499{ 1500 struct cfq_rb_root *st; 1501 int i, j; 1502 1503 for_each_cfqg_st(cfqg, i, j, st) 1504 *st = CFQ_RB_ROOT; 1505 RB_CLEAR_NODE(&cfqg->rb_node); 1506 1507 cfqg->ttime.last_end_request = jiffies; 1508} 1509 1510#ifdef CONFIG_CFQ_GROUP_IOSCHED 1511static void cfq_pd_init(struct blkcg_gq *blkg) 1512{ 1513 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1514 1515 cfq_init_cfqg_base(cfqg); 1516 cfqg->weight = blkg->blkcg->cfq_weight; 1517 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; 1518} 1519 1520static void cfq_pd_offline(struct blkcg_gq *blkg) 1521{ 1522 /* 1523 * @blkg is going offline and will be ignored by 1524 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so 1525 * that they don't get lost. If IOs complete after this point, the 1526 * stats for them will be lost. Oh well... 1527 */ 1528 cfqg_stats_xfer_dead(blkg_to_cfqg(blkg)); 1529} 1530 1531/* offset delta from cfqg->stats to cfqg->dead_stats */ 1532static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - 1533 offsetof(struct cfq_group, stats); 1534 1535/* to be used by recursive prfill, sums live and dead stats recursively */ 1536static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off) 1537{ 1538 u64 sum = 0; 1539 1540 sum += blkg_stat_recursive_sum(pd, off); 1541 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta); 1542 return sum; 1543} 1544 1545/* to be used by recursive prfill, sums live and dead rwstats recursively */ 1546static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, 1547 int off) 1548{ 1549 struct blkg_rwstat a, b; 1550 1551 a = blkg_rwstat_recursive_sum(pd, off); 1552 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta); 1553 blkg_rwstat_merge(&a, &b); 1554 return a; 1555} 1556 1557static void cfq_pd_reset_stats(struct blkcg_gq *blkg) 1558{ 1559 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1560 1561 cfqg_stats_reset(&cfqg->stats); 1562 cfqg_stats_reset(&cfqg->dead_stats); 1563} 1564 1565/* 1566 * Search for the cfq group current task belongs to. request_queue lock must 1567 * be held. 1568 */ 1569static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1570 struct blkcg *blkcg) 1571{ 1572 struct request_queue *q = cfqd->queue; 1573 struct cfq_group *cfqg = NULL; 1574 1575 /* avoid lookup for the common case where there's no blkcg */ 1576 if (blkcg == &blkcg_root) { 1577 cfqg = cfqd->root_group; 1578 } else { 1579 struct blkcg_gq *blkg; 1580 1581 blkg = blkg_lookup_create(blkcg, q); 1582 if (!IS_ERR(blkg)) 1583 cfqg = blkg_to_cfqg(blkg); 1584 } 1585 1586 return cfqg; 1587} 1588 1589static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) 1590{ 1591 /* Currently, all async queues are mapped to root group */ 1592 if (!cfq_cfqq_sync(cfqq)) 1593 cfqg = cfqq->cfqd->root_group; 1594 1595 cfqq->cfqg = cfqg; 1596 /* cfqq reference on cfqg */ 1597 cfqg_get(cfqg); 1598} 1599 1600static u64 cfqg_prfill_weight_device(struct seq_file *sf, 1601 struct blkg_policy_data *pd, int off) 1602{ 1603 struct cfq_group *cfqg = pd_to_cfqg(pd); 1604 1605 if (!cfqg->dev_weight) 1606 return 0; 1607 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); 1608} 1609 1610static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft, 1611 struct seq_file *sf) 1612{ 1613 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), 1614 cfqg_prfill_weight_device, &blkcg_policy_cfq, 0, 1615 false); 1616 return 0; 1617} 1618 1619static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, 1620 struct blkg_policy_data *pd, int off) 1621{ 1622 struct cfq_group *cfqg = pd_to_cfqg(pd); 1623 1624 if (!cfqg->dev_leaf_weight) 1625 return 0; 1626 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); 1627} 1628 1629static int cfqg_print_leaf_weight_device(struct cgroup *cgrp, 1630 struct cftype *cft, 1631 struct seq_file *sf) 1632{ 1633 blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), 1634 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0, 1635 false); 1636 return 0; 1637} 1638 1639static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft, 1640 struct seq_file *sf) 1641{ 1642 seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight); 1643 return 0; 1644} 1645 1646static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft, 1647 struct seq_file *sf) 1648{ 1649 seq_printf(sf, "%u\n", 1650 cgroup_to_blkcg(cgrp)->cfq_leaf_weight); 1651 return 0; 1652} 1653 1654static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, 1655 const char *buf, bool is_leaf_weight) 1656{ 1657 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1658 struct blkg_conf_ctx ctx; 1659 struct cfq_group *cfqg; 1660 int ret; 1661 1662 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx); 1663 if (ret) 1664 return ret; 1665 1666 ret = -EINVAL; 1667 cfqg = blkg_to_cfqg(ctx.blkg); 1668 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) { 1669 if (!is_leaf_weight) { 1670 cfqg->dev_weight = ctx.v; 1671 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight; 1672 } else { 1673 cfqg->dev_leaf_weight = ctx.v; 1674 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight; 1675 } 1676 ret = 0; 1677 } 1678 1679 blkg_conf_finish(&ctx); 1680 return ret; 1681} 1682 1683static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft, 1684 const char *buf) 1685{ 1686 return __cfqg_set_weight_device(cgrp, cft, buf, false); 1687} 1688 1689static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft, 1690 const char *buf) 1691{ 1692 return __cfqg_set_weight_device(cgrp, cft, buf, true); 1693} 1694 1695static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, 1696 bool is_leaf_weight) 1697{ 1698 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1699 struct blkcg_gq *blkg; 1700 struct hlist_node *n; 1701 1702 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1703 return -EINVAL; 1704 1705 spin_lock_irq(&blkcg->lock); 1706 1707 if (!is_leaf_weight) 1708 blkcg->cfq_weight = val; 1709 else 1710 blkcg->cfq_leaf_weight = val; 1711 1712 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 1713 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1714 1715 if (!cfqg) 1716 continue; 1717 1718 if (!is_leaf_weight) { 1719 if (!cfqg->dev_weight) 1720 cfqg->new_weight = blkcg->cfq_weight; 1721 } else { 1722 if (!cfqg->dev_leaf_weight) 1723 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight; 1724 } 1725 } 1726 1727 spin_unlock_irq(&blkcg->lock); 1728 return 0; 1729} 1730 1731static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1732{ 1733 return __cfq_set_weight(cgrp, cft, val, false); 1734} 1735 1736static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) 1737{ 1738 return __cfq_set_weight(cgrp, cft, val, true); 1739} 1740 1741static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft, 1742 struct seq_file *sf) 1743{ 1744 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1745 1746 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq, 1747 cft->private, false); 1748 return 0; 1749} 1750 1751static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft, 1752 struct seq_file *sf) 1753{ 1754 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1755 1756 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq, 1757 cft->private, true); 1758 return 0; 1759} 1760 1761static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, 1762 struct blkg_policy_data *pd, int off) 1763{ 1764 u64 sum = cfqg_stat_pd_recursive_sum(pd, off); 1765 1766 return __blkg_prfill_u64(sf, pd, sum); 1767} 1768 1769static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, 1770 struct blkg_policy_data *pd, int off) 1771{ 1772 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); 1773 1774 return __blkg_prfill_rwstat(sf, pd, &sum); 1775} 1776 1777static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft, 1778 struct seq_file *sf) 1779{ 1780 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1781 1782 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, 1783 &blkcg_policy_cfq, cft->private, false); 1784 return 0; 1785} 1786 1787static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft, 1788 struct seq_file *sf) 1789{ 1790 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1791 1792 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, 1793 &blkcg_policy_cfq, cft->private, true); 1794 return 0; 1795} 1796 1797#ifdef CONFIG_DEBUG_BLK_CGROUP 1798static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, 1799 struct blkg_policy_data *pd, int off) 1800{ 1801 struct cfq_group *cfqg = pd_to_cfqg(pd); 1802 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); 1803 u64 v = 0; 1804 1805 if (samples) { 1806 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); 1807 do_div(v, samples); 1808 } 1809 __blkg_prfill_u64(sf, pd, v); 1810 return 0; 1811} 1812 1813/* print avg_queue_size */ 1814static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, 1815 struct seq_file *sf) 1816{ 1817 struct blkcg *blkcg = cgroup_to_blkcg(cgrp); 1818 1819 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, 1820 &blkcg_policy_cfq, 0, false); 1821 return 0; 1822} 1823#endif /* CONFIG_DEBUG_BLK_CGROUP */ 1824 1825static struct cftype cfq_blkcg_files[] = { 1826 /* on root, weight is mapped to leaf_weight */ 1827 { 1828 .name = "weight_device", 1829 .flags = CFTYPE_ONLY_ON_ROOT, 1830 .read_seq_string = cfqg_print_leaf_weight_device, 1831 .write_string = cfqg_set_leaf_weight_device, 1832 .max_write_len = 256, 1833 }, 1834 { 1835 .name = "weight", 1836 .flags = CFTYPE_ONLY_ON_ROOT, 1837 .read_seq_string = cfq_print_leaf_weight, 1838 .write_u64 = cfq_set_leaf_weight, 1839 }, 1840 1841 /* no such mapping necessary for !roots */ 1842 { 1843 .name = "weight_device", 1844 .flags = CFTYPE_NOT_ON_ROOT, 1845 .read_seq_string = cfqg_print_weight_device, 1846 .write_string = cfqg_set_weight_device, 1847 .max_write_len = 256, 1848 }, 1849 { 1850 .name = "weight", 1851 .flags = CFTYPE_NOT_ON_ROOT, 1852 .read_seq_string = cfq_print_weight, 1853 .write_u64 = cfq_set_weight, 1854 }, 1855 1856 { 1857 .name = "leaf_weight_device", 1858 .read_seq_string = cfqg_print_leaf_weight_device, 1859 .write_string = cfqg_set_leaf_weight_device, 1860 .max_write_len = 256, 1861 }, 1862 { 1863 .name = "leaf_weight", 1864 .read_seq_string = cfq_print_leaf_weight, 1865 .write_u64 = cfq_set_leaf_weight, 1866 }, 1867 1868 /* statistics, covers only the tasks in the cfqg */ 1869 { 1870 .name = "time", 1871 .private = offsetof(struct cfq_group, stats.time), 1872 .read_seq_string = cfqg_print_stat, 1873 }, 1874 { 1875 .name = "sectors", 1876 .private = offsetof(struct cfq_group, stats.sectors), 1877 .read_seq_string = cfqg_print_stat, 1878 }, 1879 { 1880 .name = "io_service_bytes", 1881 .private = offsetof(struct cfq_group, stats.service_bytes), 1882 .read_seq_string = cfqg_print_rwstat, 1883 }, 1884 { 1885 .name = "io_serviced", 1886 .private = offsetof(struct cfq_group, stats.serviced), 1887 .read_seq_string = cfqg_print_rwstat, 1888 }, 1889 { 1890 .name = "io_service_time", 1891 .private = offsetof(struct cfq_group, stats.service_time), 1892 .read_seq_string = cfqg_print_rwstat, 1893 }, 1894 { 1895 .name = "io_wait_time", 1896 .private = offsetof(struct cfq_group, stats.wait_time), 1897 .read_seq_string = cfqg_print_rwstat, 1898 }, 1899 { 1900 .name = "io_merged", 1901 .private = offsetof(struct cfq_group, stats.merged), 1902 .read_seq_string = cfqg_print_rwstat, 1903 }, 1904 { 1905 .name = "io_queued", 1906 .private = offsetof(struct cfq_group, stats.queued), 1907 .read_seq_string = cfqg_print_rwstat, 1908 }, 1909 1910 /* the same statictics which cover the cfqg and its descendants */ 1911 { 1912 .name = "time_recursive", 1913 .private = offsetof(struct cfq_group, stats.time), 1914 .read_seq_string = cfqg_print_stat_recursive, 1915 }, 1916 { 1917 .name = "sectors_recursive", 1918 .private = offsetof(struct cfq_group, stats.sectors), 1919 .read_seq_string = cfqg_print_stat_recursive, 1920 }, 1921 { 1922 .name = "io_service_bytes_recursive", 1923 .private = offsetof(struct cfq_group, stats.service_bytes), 1924 .read_seq_string = cfqg_print_rwstat_recursive, 1925 }, 1926 { 1927 .name = "io_serviced_recursive", 1928 .private = offsetof(struct cfq_group, stats.serviced), 1929 .read_seq_string = cfqg_print_rwstat_recursive, 1930 }, 1931 { 1932 .name = "io_service_time_recursive", 1933 .private = offsetof(struct cfq_group, stats.service_time), 1934 .read_seq_string = cfqg_print_rwstat_recursive, 1935 }, 1936 { 1937 .name = "io_wait_time_recursive", 1938 .private = offsetof(struct cfq_group, stats.wait_time), 1939 .read_seq_string = cfqg_print_rwstat_recursive, 1940 }, 1941 { 1942 .name = "io_merged_recursive", 1943 .private = offsetof(struct cfq_group, stats.merged), 1944 .read_seq_string = cfqg_print_rwstat_recursive, 1945 }, 1946 { 1947 .name = "io_queued_recursive", 1948 .private = offsetof(struct cfq_group, stats.queued), 1949 .read_seq_string = cfqg_print_rwstat_recursive, 1950 }, 1951#ifdef CONFIG_DEBUG_BLK_CGROUP 1952 { 1953 .name = "avg_queue_size", 1954 .read_seq_string = cfqg_print_avg_queue_size, 1955 }, 1956 { 1957 .name = "group_wait_time", 1958 .private = offsetof(struct cfq_group, stats.group_wait_time), 1959 .read_seq_string = cfqg_print_stat, 1960 }, 1961 { 1962 .name = "idle_time", 1963 .private = offsetof(struct cfq_group, stats.idle_time), 1964 .read_seq_string = cfqg_print_stat, 1965 }, 1966 { 1967 .name = "empty_time", 1968 .private = offsetof(struct cfq_group, stats.empty_time), 1969 .read_seq_string = cfqg_print_stat, 1970 }, 1971 { 1972 .name = "dequeue", 1973 .private = offsetof(struct cfq_group, stats.dequeue), 1974 .read_seq_string = cfqg_print_stat, 1975 }, 1976 { 1977 .name = "unaccounted_time", 1978 .private = offsetof(struct cfq_group, stats.unaccounted_time), 1979 .read_seq_string = cfqg_print_stat, 1980 }, 1981#endif /* CONFIG_DEBUG_BLK_CGROUP */ 1982 { } /* terminate */ 1983}; 1984#else /* GROUP_IOSCHED */ 1985static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1986 struct blkcg *blkcg) 1987{ 1988 return cfqd->root_group; 1989} 1990 1991static inline void 1992cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { 1993 cfqq->cfqg = cfqg; 1994} 1995 1996#endif /* GROUP_IOSCHED */ 1997 1998/* 1999 * The cfqd->service_trees holds all pending cfq_queue's that have 2000 * requests waiting to be processed. It is sorted in the order that 2001 * we will service the queues. 2002 */ 2003static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2004 bool add_front) 2005{ 2006 struct rb_node **p, *parent; 2007 struct cfq_queue *__cfqq; 2008 unsigned long rb_key; 2009 struct cfq_rb_root *st; 2010 int left; 2011 int new_cfqq = 1; 2012 2013 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); 2014 if (cfq_class_idle(cfqq)) { 2015 rb_key = CFQ_IDLE_DELAY; 2016 parent = rb_last(&st->rb); 2017 if (parent && parent != &cfqq->rb_node) { 2018 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 2019 rb_key += __cfqq->rb_key; 2020 } else 2021 rb_key += jiffies; 2022 } else if (!add_front) { 2023 /* 2024 * Get our rb key offset. Subtract any residual slice 2025 * value carried from last service. A negative resid 2026 * count indicates slice overrun, and this should position 2027 * the next service time further away in the tree. 2028 */ 2029 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 2030 rb_key -= cfqq->slice_resid; 2031 cfqq->slice_resid = 0; 2032 } else { 2033 rb_key = -HZ; 2034 __cfqq = cfq_rb_first(st); 2035 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 2036 } 2037 2038 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 2039 new_cfqq = 0; 2040 /* 2041 * same position, nothing more to do 2042 */ 2043 if (rb_key == cfqq->rb_key && cfqq->service_tree == st) 2044 return; 2045 2046 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 2047 cfqq->service_tree = NULL; 2048 } 2049 2050 left = 1; 2051 parent = NULL; 2052 cfqq->service_tree = st; 2053 p = &st->rb.rb_node; 2054 while (*p) { 2055 parent = *p; 2056 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 2057 2058 /* 2059 * sort by key, that represents service time. 2060 */ 2061 if (time_before(rb_key, __cfqq->rb_key)) 2062 p = &parent->rb_left; 2063 else { 2064 p = &parent->rb_right; 2065 left = 0; 2066 } 2067 } 2068 2069 if (left) 2070 st->left = &cfqq->rb_node; 2071 2072 cfqq->rb_key = rb_key; 2073 rb_link_node(&cfqq->rb_node, parent, p); 2074 rb_insert_color(&cfqq->rb_node, &st->rb); 2075 st->count++; 2076 if (add_front || !new_cfqq) 2077 return; 2078 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); 2079} 2080 2081static struct cfq_queue * 2082cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, 2083 sector_t sector, struct rb_node **ret_parent, 2084 struct rb_node ***rb_link) 2085{ 2086 struct rb_node **p, *parent; 2087 struct cfq_queue *cfqq = NULL; 2088 2089 parent = NULL; 2090 p = &root->rb_node; 2091 while (*p) { 2092 struct rb_node **n; 2093 2094 parent = *p; 2095 cfqq = rb_entry(parent, struct cfq_queue, p_node); 2096 2097 /* 2098 * Sort strictly based on sector. Smallest to the left, 2099 * largest to the right. 2100 */ 2101 if (sector > blk_rq_pos(cfqq->next_rq)) 2102 n = &(*p)->rb_right; 2103 else if (sector < blk_rq_pos(cfqq->next_rq)) 2104 n = &(*p)->rb_left; 2105 else 2106 break; 2107 p = n; 2108 cfqq = NULL; 2109 } 2110 2111 *ret_parent = parent; 2112 if (rb_link) 2113 *rb_link = p; 2114 return cfqq; 2115} 2116 2117static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2118{ 2119 struct rb_node **p, *parent; 2120 struct cfq_queue *__cfqq; 2121 2122 if (cfqq->p_root) { 2123 rb_erase(&cfqq->p_node, cfqq->p_root); 2124 cfqq->p_root = NULL; 2125 } 2126 2127 if (cfq_class_idle(cfqq)) 2128 return; 2129 if (!cfqq->next_rq) 2130 return; 2131 2132 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 2133 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, 2134 blk_rq_pos(cfqq->next_rq), &parent, &p); 2135 if (!__cfqq) { 2136 rb_link_node(&cfqq->p_node, parent, p); 2137 rb_insert_color(&cfqq->p_node, cfqq->p_root); 2138 } else 2139 cfqq->p_root = NULL; 2140} 2141 2142/* 2143 * Update cfqq's position in the service tree. 2144 */ 2145static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2146{ 2147 /* 2148 * Resorting requires the cfqq to be on the RR list already. 2149 */ 2150 if (cfq_cfqq_on_rr(cfqq)) { 2151 cfq_service_tree_add(cfqd, cfqq, 0); 2152 cfq_prio_tree_add(cfqd, cfqq); 2153 } 2154} 2155 2156/* 2157 * add to busy list of queues for service, trying to be fair in ordering 2158 * the pending list according to last request service 2159 */ 2160static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2161{ 2162 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); 2163 BUG_ON(cfq_cfqq_on_rr(cfqq)); 2164 cfq_mark_cfqq_on_rr(cfqq); 2165 cfqd->busy_queues++; 2166 if (cfq_cfqq_sync(cfqq)) 2167 cfqd->busy_sync_queues++; 2168 2169 cfq_resort_rr_list(cfqd, cfqq); 2170} 2171 2172/* 2173 * Called when the cfqq no longer has requests pending, remove it from 2174 * the service tree. 2175 */ 2176static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2177{ 2178 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); 2179 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 2180 cfq_clear_cfqq_on_rr(cfqq); 2181 2182 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 2183 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 2184 cfqq->service_tree = NULL; 2185 } 2186 if (cfqq->p_root) { 2187 rb_erase(&cfqq->p_node, cfqq->p_root); 2188 cfqq->p_root = NULL; 2189 } 2190 2191 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); 2192 BUG_ON(!cfqd->busy_queues); 2193 cfqd->busy_queues--; 2194 if (cfq_cfqq_sync(cfqq)) 2195 cfqd->busy_sync_queues--; 2196} 2197 2198/* 2199 * rb tree support functions 2200 */ 2201static void cfq_del_rq_rb(struct request *rq) 2202{ 2203 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2204 const int sync = rq_is_sync(rq); 2205 2206 BUG_ON(!cfqq->queued[sync]); 2207 cfqq->queued[sync]--; 2208 2209 elv_rb_del(&cfqq->sort_list, rq); 2210 2211 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { 2212 /* 2213 * Queue will be deleted from service tree when we actually 2214 * expire it later. Right now just remove it from prio tree 2215 * as it is empty. 2216 */ 2217 if (cfqq->p_root) { 2218 rb_erase(&cfqq->p_node, cfqq->p_root); 2219 cfqq->p_root = NULL; 2220 } 2221 } 2222} 2223 2224static void cfq_add_rq_rb(struct request *rq) 2225{ 2226 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2227 struct cfq_data *cfqd = cfqq->cfqd; 2228 struct request *prev; 2229 2230 cfqq->queued[rq_is_sync(rq)]++; 2231 2232 elv_rb_add(&cfqq->sort_list, rq); 2233 2234 if (!cfq_cfqq_on_rr(cfqq)) 2235 cfq_add_cfqq_rr(cfqd, cfqq); 2236 2237 /* 2238 * check if this request is a better next-serve candidate 2239 */ 2240 prev = cfqq->next_rq; 2241 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); 2242 2243 /* 2244 * adjust priority tree position, if ->next_rq changes 2245 */ 2246 if (prev != cfqq->next_rq) 2247 cfq_prio_tree_add(cfqd, cfqq); 2248 2249 BUG_ON(!cfqq->next_rq); 2250} 2251 2252static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 2253{ 2254 elv_rb_del(&cfqq->sort_list, rq); 2255 cfqq->queued[rq_is_sync(rq)]--; 2256 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2257 cfq_add_rq_rb(rq); 2258 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, 2259 rq->cmd_flags); 2260} 2261 2262static struct request * 2263cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 2264{ 2265 struct task_struct *tsk = current; 2266 struct cfq_io_cq *cic; 2267 struct cfq_queue *cfqq; 2268 2269 cic = cfq_cic_lookup(cfqd, tsk->io_context); 2270 if (!cic) 2271 return NULL; 2272 2273 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 2274 if (cfqq) { 2275 sector_t sector = bio->bi_sector + bio_sectors(bio); 2276 2277 return elv_rb_find(&cfqq->sort_list, sector); 2278 } 2279 2280 return NULL; 2281} 2282 2283static void cfq_activate_request(struct request_queue *q, struct request *rq) 2284{ 2285 struct cfq_data *cfqd = q->elevator->elevator_data; 2286 2287 cfqd->rq_in_driver++; 2288 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 2289 cfqd->rq_in_driver); 2290 2291 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 2292} 2293 2294static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 2295{ 2296 struct cfq_data *cfqd = q->elevator->elevator_data; 2297 2298 WARN_ON(!cfqd->rq_in_driver); 2299 cfqd->rq_in_driver--; 2300 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 2301 cfqd->rq_in_driver); 2302} 2303 2304static void cfq_remove_request(struct request *rq) 2305{ 2306 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2307 2308 if (cfqq->next_rq == rq) 2309 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 2310 2311 list_del_init(&rq->queuelist); 2312 cfq_del_rq_rb(rq); 2313 2314 cfqq->cfqd->rq_queued--; 2315 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2316 if (rq->cmd_flags & REQ_PRIO) { 2317 WARN_ON(!cfqq->prio_pending); 2318 cfqq->prio_pending--; 2319 } 2320} 2321 2322static int cfq_merge(struct request_queue *q, struct request **req, 2323 struct bio *bio) 2324{ 2325 struct cfq_data *cfqd = q->elevator->elevator_data; 2326 struct request *__rq; 2327 2328 __rq = cfq_find_rq_fmerge(cfqd, bio); 2329 if (__rq && elv_rq_merge_ok(__rq, bio)) { 2330 *req = __rq; 2331 return ELEVATOR_FRONT_MERGE; 2332 } 2333 2334 return ELEVATOR_NO_MERGE; 2335} 2336 2337static void cfq_merged_request(struct request_queue *q, struct request *req, 2338 int type) 2339{ 2340 if (type == ELEVATOR_FRONT_MERGE) { 2341 struct cfq_queue *cfqq = RQ_CFQQ(req); 2342 2343 cfq_reposition_rq_rb(cfqq, req); 2344 } 2345} 2346 2347static void cfq_bio_merged(struct request_queue *q, struct request *req, 2348 struct bio *bio) 2349{ 2350 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); 2351} 2352 2353static void 2354cfq_merged_requests(struct request_queue *q, struct request *rq, 2355 struct request *next) 2356{ 2357 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2358 struct cfq_data *cfqd = q->elevator->elevator_data; 2359 2360 /* 2361 * reposition in fifo if next is older than rq 2362 */ 2363 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 2364 time_before(rq_fifo_time(next), rq_fifo_time(rq)) && 2365 cfqq == RQ_CFQQ(next)) { 2366 list_move(&rq->queuelist, &next->queuelist); 2367 rq_set_fifo_time(rq, rq_fifo_time(next)); 2368 } 2369 2370 if (cfqq->next_rq == next) 2371 cfqq->next_rq = rq; 2372 cfq_remove_request(next); 2373 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); 2374 2375 cfqq = RQ_CFQQ(next); 2376 /* 2377 * all requests of this queue are merged to other queues, delete it 2378 * from the service tree. If it's the active_queue, 2379 * cfq_dispatch_requests() will choose to expire it or do idle 2380 */ 2381 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && 2382 cfqq != cfqd->active_queue) 2383 cfq_del_cfqq_rr(cfqd, cfqq); 2384} 2385 2386static int cfq_allow_merge(struct request_queue *q, struct request *rq, 2387 struct bio *bio) 2388{ 2389 struct cfq_data *cfqd = q->elevator->elevator_data; 2390 struct cfq_io_cq *cic; 2391 struct cfq_queue *cfqq; 2392 2393 /* 2394 * Disallow merge of a sync bio into an async request. 2395 */ 2396 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 2397 return false; 2398 2399 /* 2400 * Lookup the cfqq that this bio will be queued with and allow 2401 * merge only if rq is queued there. 2402 */ 2403 cic = cfq_cic_lookup(cfqd, current->io_context); 2404 if (!cic) 2405 return false; 2406 2407 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 2408 return cfqq == RQ_CFQQ(rq); 2409} 2410 2411static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2412{ 2413 del_timer(&cfqd->idle_slice_timer); 2414 cfqg_stats_update_idle_time(cfqq->cfqg); 2415} 2416 2417static void __cfq_set_active_queue(struct cfq_data *cfqd, 2418 struct cfq_queue *cfqq) 2419{ 2420 if (cfqq) { 2421 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", 2422 cfqd->serving_wl_class, cfqd->serving_wl_type); 2423 cfqg_stats_update_avg_queue_size(cfqq->cfqg); 2424 cfqq->slice_start = 0; 2425 cfqq->dispatch_start = jiffies; 2426 cfqq->allocated_slice = 0; 2427 cfqq->slice_end = 0; 2428 cfqq->slice_dispatch = 0; 2429 cfqq->nr_sectors = 0; 2430 2431 cfq_clear_cfqq_wait_request(cfqq); 2432 cfq_clear_cfqq_must_dispatch(cfqq); 2433 cfq_clear_cfqq_must_alloc_slice(cfqq); 2434 cfq_clear_cfqq_fifo_expire(cfqq); 2435 cfq_mark_cfqq_slice_new(cfqq); 2436 2437 cfq_del_timer(cfqd, cfqq); 2438 } 2439 2440 cfqd->active_queue = cfqq; 2441} 2442 2443/* 2444 * current cfqq expired its slice (or was too idle), select new one 2445 */ 2446static void 2447__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2448 bool timed_out) 2449{ 2450 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 2451 2452 if (cfq_cfqq_wait_request(cfqq)) 2453 cfq_del_timer(cfqd, cfqq); 2454 2455 cfq_clear_cfqq_wait_request(cfqq); 2456 cfq_clear_cfqq_wait_busy(cfqq); 2457 2458 /* 2459 * If this cfqq is shared between multiple processes, check to 2460 * make sure that those processes are still issuing I/Os within 2461 * the mean seek distance. If not, it may be time to break the 2462 * queues apart again. 2463 */ 2464 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) 2465 cfq_mark_cfqq_split_coop(cfqq); 2466 2467 /* 2468 * store what was left of this slice, if the queue idled/timed out 2469 */ 2470 if (timed_out) { 2471 if (cfq_cfqq_slice_new(cfqq)) 2472 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); 2473 else 2474 cfqq->slice_resid = cfqq->slice_end - jiffies; 2475 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 2476 } 2477 2478 cfq_group_served(cfqd, cfqq->cfqg, cfqq); 2479 2480 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 2481 cfq_del_cfqq_rr(cfqd, cfqq); 2482 2483 cfq_resort_rr_list(cfqd, cfqq); 2484 2485 if (cfqq == cfqd->active_queue) 2486 cfqd->active_queue = NULL; 2487 2488 if (cfqd->active_cic) { 2489 put_io_context(cfqd->active_cic->icq.ioc); 2490 cfqd->active_cic = NULL; 2491 } 2492} 2493 2494static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) 2495{ 2496 struct cfq_queue *cfqq = cfqd->active_queue; 2497 2498 if (cfqq) 2499 __cfq_slice_expired(cfqd, cfqq, timed_out); 2500} 2501 2502/* 2503 * Get next queue for service. Unless we have a queue preemption, 2504 * we'll simply select the first cfqq in the service tree. 2505 */ 2506static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 2507{ 2508 struct cfq_rb_root *st = st_for(cfqd->serving_group, 2509 cfqd->serving_wl_class, cfqd->serving_wl_type); 2510 2511 if (!cfqd->rq_queued) 2512 return NULL; 2513 2514 /* There is nothing to dispatch */ 2515 if (!st) 2516 return NULL; 2517 if (RB_EMPTY_ROOT(&st->rb)) 2518 return NULL; 2519 return cfq_rb_first(st); 2520} 2521 2522static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) 2523{ 2524 struct cfq_group *cfqg; 2525 struct cfq_queue *cfqq; 2526 int i, j; 2527 struct cfq_rb_root *st; 2528 2529 if (!cfqd->rq_queued) 2530 return NULL; 2531 2532 cfqg = cfq_get_next_cfqg(cfqd); 2533 if (!cfqg) 2534 return NULL; 2535 2536 for_each_cfqg_st(cfqg, i, j, st) 2537 if ((cfqq = cfq_rb_first(st)) != NULL) 2538 return cfqq; 2539 return NULL; 2540} 2541 2542/* 2543 * Get and set a new active queue for service. 2544 */ 2545static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, 2546 struct cfq_queue *cfqq) 2547{ 2548 if (!cfqq) 2549 cfqq = cfq_get_next_queue(cfqd); 2550 2551 __cfq_set_active_queue(cfqd, cfqq); 2552 return cfqq; 2553} 2554 2555static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 2556 struct request *rq) 2557{ 2558 if (blk_rq_pos(rq) >= cfqd->last_position) 2559 return blk_rq_pos(rq) - cfqd->last_position; 2560 else 2561 return cfqd->last_position - blk_rq_pos(rq); 2562} 2563 2564static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2565 struct request *rq) 2566{ 2567 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; 2568} 2569 2570static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 2571 struct cfq_queue *cur_cfqq) 2572{ 2573 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; 2574 struct rb_node *parent, *node; 2575 struct cfq_queue *__cfqq; 2576 sector_t sector = cfqd->last_position; 2577 2578 if (RB_EMPTY_ROOT(root)) 2579 return NULL; 2580 2581 /* 2582 * First, if we find a request starting at the end of the last 2583 * request, choose it. 2584 */ 2585 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); 2586 if (__cfqq) 2587 return __cfqq; 2588 2589 /* 2590 * If the exact sector wasn't found, the parent of the NULL leaf 2591 * will contain the closest sector. 2592 */ 2593 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 2594 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 2595 return __cfqq; 2596 2597 if (blk_rq_pos(__cfqq->next_rq) < sector) 2598 node = rb_next(&__cfqq->p_node); 2599 else 2600 node = rb_prev(&__cfqq->p_node); 2601 if (!node) 2602 return NULL; 2603 2604 __cfqq = rb_entry(node, struct cfq_queue, p_node); 2605 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 2606 return __cfqq; 2607 2608 return NULL; 2609} 2610 2611/* 2612 * cfqd - obvious 2613 * cur_cfqq - passed in so that we don't decide that the current queue is 2614 * closely cooperating with itself. 2615 * 2616 * So, basically we're assuming that that cur_cfqq has dispatched at least 2617 * one request, and that cfqd->last_position reflects a position on the disk 2618 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid 2619 * assumption. 2620 */ 2621static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 2622 struct cfq_queue *cur_cfqq) 2623{ 2624 struct cfq_queue *cfqq; 2625 2626 if (cfq_class_idle(cur_cfqq)) 2627 return NULL; 2628 if (!cfq_cfqq_sync(cur_cfqq)) 2629 return NULL; 2630 if (CFQQ_SEEKY(cur_cfqq)) 2631 return NULL; 2632 2633 /* 2634 * Don't search priority tree if it's the only queue in the group. 2635 */ 2636 if (cur_cfqq->cfqg->nr_cfqq == 1) 2637 return NULL; 2638 2639 /* 2640 * We should notice if some of the queues are cooperating, eg 2641 * working closely on the same area of the disk. In that case, 2642 * we can group them together and don't waste time idling. 2643 */ 2644 cfqq = cfqq_close(cfqd, cur_cfqq); 2645 if (!cfqq) 2646 return NULL; 2647 2648 /* If new queue belongs to different cfq_group, don't choose it */ 2649 if (cur_cfqq->cfqg != cfqq->cfqg) 2650 return NULL; 2651 2652 /* 2653 * It only makes sense to merge sync queues. 2654 */ 2655 if (!cfq_cfqq_sync(cfqq)) 2656 return NULL; 2657 if (CFQQ_SEEKY(cfqq)) 2658 return NULL; 2659 2660 /* 2661 * Do not merge queues of different priority classes 2662 */ 2663 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) 2664 return NULL; 2665 2666 return cfqq; 2667} 2668 2669/* 2670 * Determine whether we should enforce idle window for this queue. 2671 */ 2672 2673static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2674{ 2675 enum wl_class_t wl_class = cfqq_class(cfqq); 2676 struct cfq_rb_root *st = cfqq->service_tree; 2677 2678 BUG_ON(!st); 2679 BUG_ON(!st->count); 2680 2681 if (!cfqd->cfq_slice_idle) 2682 return false; 2683 2684 /* We never do for idle class queues. */ 2685 if (wl_class == IDLE_WORKLOAD) 2686 return false; 2687 2688 /* We do for queues that were marked with idle window flag. */ 2689 if (cfq_cfqq_idle_window(cfqq) && 2690 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) 2691 return true; 2692 2693 /* 2694 * Otherwise, we do only if they are the last ones 2695 * in their service tree. 2696 */ 2697 if (st->count == 1 && cfq_cfqq_sync(cfqq) && 2698 !cfq_io_thinktime_big(cfqd, &st->ttime, false)) 2699 return true; 2700 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); 2701 return false; 2702} 2703 2704static void cfq_arm_slice_timer(struct cfq_data *cfqd) 2705{ 2706 struct cfq_queue *cfqq = cfqd->active_queue; 2707 struct cfq_io_cq *cic; 2708 unsigned long sl, group_idle = 0; 2709 2710 /* 2711 * SSD device without seek penalty, disable idling. But only do so 2712 * for devices that support queuing, otherwise we still have a problem 2713 * with sync vs async workloads. 2714 */ 2715 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 2716 return; 2717 2718 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 2719 WARN_ON(cfq_cfqq_slice_new(cfqq)); 2720 2721 /* 2722 * idle is disabled, either manually or by past process history 2723 */ 2724 if (!cfq_should_idle(cfqd, cfqq)) { 2725 /* no queue idling. Check for group idling */ 2726 if (cfqd->cfq_group_idle) 2727 group_idle = cfqd->cfq_group_idle; 2728 else 2729 return; 2730 } 2731 2732 /* 2733 * still active requests from this queue, don't idle 2734 */ 2735 if (cfqq->dispatched) 2736 return; 2737 2738 /* 2739 * task has exited, don't wait 2740 */ 2741 cic = cfqd->active_cic; 2742 if (!cic || !atomic_read(&cic->icq.ioc->active_ref)) 2743 return; 2744 2745 /* 2746 * If our average think time is larger than the remaining time 2747 * slice, then don't idle. This avoids overrunning the allotted 2748 * time slice. 2749 */ 2750 if (sample_valid(cic->ttime.ttime_samples) && 2751 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { 2752 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", 2753 cic->ttime.ttime_mean); 2754 return; 2755 } 2756 2757 /* There are other queues in the group, don't do group idle */ 2758 if (group_idle && cfqq->cfqg->nr_cfqq > 1) 2759 return; 2760 2761 cfq_mark_cfqq_wait_request(cfqq); 2762 2763 if (group_idle) 2764 sl = cfqd->cfq_group_idle; 2765 else 2766 sl = cfqd->cfq_slice_idle; 2767 2768 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 2769 cfqg_stats_set_start_idle_time(cfqq->cfqg); 2770 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 2771 group_idle ? 1 : 0); 2772} 2773 2774/* 2775 * Move request from internal lists to the request queue dispatch list. 2776 */ 2777static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 2778{ 2779 struct cfq_data *cfqd = q->elevator->elevator_data; 2780 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2781 2782 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); 2783 2784 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 2785 cfq_remove_request(rq); 2786 cfqq->dispatched++; 2787 (RQ_CFQG(rq))->dispatched++; 2788 elv_dispatch_sort(q, rq); 2789 2790 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2791 cfqq->nr_sectors += blk_rq_sectors(rq); 2792 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); 2793} 2794 2795/* 2796 * return expired entry, or NULL to just start from scratch in rbtree 2797 */ 2798static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 2799{ 2800 struct request *rq = NULL; 2801 2802 if (cfq_cfqq_fifo_expire(cfqq)) 2803 return NULL; 2804 2805 cfq_mark_cfqq_fifo_expire(cfqq); 2806 2807 if (list_empty(&cfqq->fifo)) 2808 return NULL; 2809 2810 rq = rq_entry_fifo(cfqq->fifo.next); 2811 if (time_before(jiffies, rq_fifo_time(rq))) 2812 rq = NULL; 2813 2814 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 2815 return rq; 2816} 2817 2818static inline int 2819cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2820{ 2821 const int base_rq = cfqd->cfq_slice_async_rq; 2822 2823 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 2824 2825 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); 2826} 2827 2828/* 2829 * Must be called with the queue_lock held. 2830 */ 2831static int cfqq_process_refs(struct cfq_queue *cfqq) 2832{ 2833 int process_refs, io_refs; 2834 2835 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; 2836 process_refs = cfqq->ref - io_refs; 2837 BUG_ON(process_refs < 0); 2838 return process_refs; 2839} 2840 2841static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) 2842{ 2843 int process_refs, new_process_refs; 2844 struct cfq_queue *__cfqq; 2845 2846 /* 2847 * If there are no process references on the new_cfqq, then it is 2848 * unsafe to follow the ->new_cfqq chain as other cfqq's in the 2849 * chain may have dropped their last reference (not just their 2850 * last process reference). 2851 */ 2852 if (!cfqq_process_refs(new_cfqq)) 2853 return; 2854 2855 /* Avoid a circular list and skip interim queue merges */ 2856 while ((__cfqq = new_cfqq->new_cfqq)) { 2857 if (__cfqq == cfqq) 2858 return; 2859 new_cfqq = __cfqq; 2860 } 2861 2862 process_refs = cfqq_process_refs(cfqq); 2863 new_process_refs = cfqq_process_refs(new_cfqq); 2864 /* 2865 * If the process for the cfqq has gone away, there is no 2866 * sense in merging the queues. 2867 */ 2868 if (process_refs == 0 || new_process_refs == 0) 2869 return; 2870 2871 /* 2872 * Merge in the direction of the lesser amount of work. 2873 */ 2874 if (new_process_refs >= process_refs) { 2875 cfqq->new_cfqq = new_cfqq; 2876 new_cfqq->ref += process_refs; 2877 } else { 2878 new_cfqq->new_cfqq = cfqq; 2879 cfqq->ref += new_process_refs; 2880 } 2881} 2882 2883static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, 2884 struct cfq_group *cfqg, enum wl_class_t wl_class) 2885{ 2886 struct cfq_queue *queue; 2887 int i; 2888 bool key_valid = false; 2889 unsigned long lowest_key = 0; 2890 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 2891 2892 for (i = 0; i <= SYNC_WORKLOAD; ++i) { 2893 /* select the one with lowest rb_key */ 2894 queue = cfq_rb_first(st_for(cfqg, wl_class, i)); 2895 if (queue && 2896 (!key_valid || time_before(queue->rb_key, lowest_key))) { 2897 lowest_key = queue->rb_key; 2898 cur_best = i; 2899 key_valid = true; 2900 } 2901 } 2902 2903 return cur_best; 2904} 2905 2906static void 2907choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) 2908{ 2909 unsigned slice; 2910 unsigned count; 2911 struct cfq_rb_root *st; 2912 unsigned group_slice; 2913 enum wl_class_t original_class = cfqd->serving_wl_class; 2914 2915 /* Choose next priority. RT > BE > IDLE */ 2916 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) 2917 cfqd->serving_wl_class = RT_WORKLOAD; 2918 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) 2919 cfqd->serving_wl_class = BE_WORKLOAD; 2920 else { 2921 cfqd->serving_wl_class = IDLE_WORKLOAD; 2922 cfqd->workload_expires = jiffies + 1; 2923 return; 2924 } 2925 2926 if (original_class != cfqd->serving_wl_class) 2927 goto new_workload; 2928 2929 /* 2930 * For RT and BE, we have to choose also the type 2931 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2932 * expiration time 2933 */ 2934 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); 2935 count = st->count; 2936 2937 /* 2938 * check workload expiration, and that we still have other queues ready 2939 */ 2940 if (count && !time_after(jiffies, cfqd->workload_expires)) 2941 return; 2942 2943new_workload: 2944 /* otherwise select new workload type */ 2945 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, 2946 cfqd->serving_wl_class); 2947 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); 2948 count = st->count; 2949 2950 /* 2951 * the workload slice is computed as a fraction of target latency 2952 * proportional to the number of queues in that workload, over 2953 * all the queues in the same priority class 2954 */ 2955 group_slice = cfq_group_slice(cfqd, cfqg); 2956 2957 slice = group_slice * count / 2958 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], 2959 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, 2960 cfqg)); 2961 2962 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { 2963 unsigned int tmp; 2964 2965 /* 2966 * Async queues are currently system wide. Just taking 2967 * proportion of queues with-in same group will lead to higher 2968 * async ratio system wide as generally root group is going 2969 * to have higher weight. A more accurate thing would be to 2970 * calculate system wide asnc/sync ratio. 2971 */ 2972 tmp = cfqd->cfq_target_latency * 2973 cfqg_busy_async_queues(cfqd, cfqg); 2974 tmp = tmp/cfqd->busy_queues; 2975 slice = min_t(unsigned, slice, tmp); 2976 2977 /* async workload slice is scaled down according to 2978 * the sync/async slice ratio. */ 2979 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; 2980 } else 2981 /* sync workload slice is at least 2 * cfq_slice_idle */ 2982 slice = max(slice, 2 * cfqd->cfq_slice_idle); 2983 2984 slice = max_t(unsigned, slice, CFQ_MIN_TT); 2985 cfq_log(cfqd, "workload slice:%d", slice); 2986 cfqd->workload_expires = jiffies + slice; 2987} 2988 2989static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) 2990{ 2991 struct cfq_rb_root *st = &cfqd->grp_service_tree; 2992 struct cfq_group *cfqg; 2993 2994 if (RB_EMPTY_ROOT(&st->rb)) 2995 return NULL; 2996 cfqg = cfq_rb_first_group(st); 2997 update_min_vdisktime(st); 2998 return cfqg; 2999} 3000 3001static void cfq_choose_cfqg(struct cfq_data *cfqd) 3002{ 3003 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); 3004 3005 cfqd->serving_group = cfqg; 3006 3007 /* Restore the workload type data */ 3008 if (cfqg->saved_wl_slice) { 3009 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; 3010 cfqd->serving_wl_type = cfqg->saved_wl_type; 3011 cfqd->serving_wl_class = cfqg->saved_wl_class; 3012 } else 3013 cfqd->workload_expires = jiffies - 1; 3014 3015 choose_wl_class_and_type(cfqd, cfqg); 3016} 3017 3018/* 3019 * Select a queue for service. If we have a current active queue, 3020 * check whether to continue servicing it, or retrieve and set a new one. 3021 */ 3022static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 3023{ 3024 struct cfq_queue *cfqq, *new_cfqq = NULL; 3025 3026 cfqq = cfqd->active_queue; 3027 if (!cfqq) 3028 goto new_queue; 3029 3030 if (!cfqd->rq_queued) 3031 return NULL; 3032 3033 /* 3034 * We were waiting for group to get backlogged. Expire the queue 3035 */ 3036 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) 3037 goto expire; 3038 3039 /* 3040 * The active queue has run out of time, expire it and select new. 3041 */ 3042 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { 3043 /* 3044 * If slice had not expired at the completion of last request 3045 * we might not have turned on wait_busy flag. Don't expire 3046 * the queue yet. Allow the group to get backlogged. 3047 * 3048 * The very fact that we have used the slice, that means we 3049 * have been idling all along on this queue and it should be 3050 * ok to wait for this request to complete. 3051 */ 3052 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) 3053 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 3054 cfqq = NULL; 3055 goto keep_queue; 3056 } else 3057 goto check_group_idle; 3058 } 3059 3060 /* 3061 * The active queue has requests and isn't expired, allow it to 3062 * dispatch. 3063 */ 3064 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 3065 goto keep_queue; 3066 3067 /* 3068 * If another queue has a request waiting within our mean seek 3069 * distance, let it run. The expire code will check for close 3070 * cooperators and put the close queue at the front of the service 3071 * tree. If possible, merge the expiring queue with the new cfqq. 3072 */ 3073 new_cfqq = cfq_close_cooperator(cfqd, cfqq); 3074 if (new_cfqq) { 3075 if (!cfqq->new_cfqq) 3076 cfq_setup_merge(cfqq, new_cfqq); 3077 goto expire; 3078 } 3079 3080 /* 3081 * No requests pending. If the active queue still has requests in 3082 * flight or is idling for a new request, allow either of these 3083 * conditions to happen (or time out) before selecting a new queue. 3084 */ 3085 if (timer_pending(&cfqd->idle_slice_timer)) { 3086 cfqq = NULL; 3087 goto keep_queue; 3088 } 3089 3090 /* 3091 * This is a deep seek queue, but the device is much faster than 3092 * the queue can deliver, don't idle 3093 **/ 3094 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && 3095 (cfq_cfqq_slice_new(cfqq) || 3096 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { 3097 cfq_clear_cfqq_deep(cfqq); 3098 cfq_clear_cfqq_idle_window(cfqq); 3099 } 3100 3101 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 3102 cfqq = NULL; 3103 goto keep_queue; 3104 } 3105 3106 /* 3107 * If group idle is enabled and there are requests dispatched from 3108 * this group, wait for requests to complete. 3109 */ 3110check_group_idle: 3111 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && 3112 cfqq->cfqg->dispatched && 3113 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { 3114 cfqq = NULL; 3115 goto keep_queue; 3116 } 3117 3118expire: 3119 cfq_slice_expired(cfqd, 0); 3120new_queue: 3121 /* 3122 * Current queue expired. Check if we have to switch to a new 3123 * service tree 3124 */ 3125 if (!new_cfqq) 3126 cfq_choose_cfqg(cfqd); 3127 3128 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 3129keep_queue: 3130 return cfqq; 3131} 3132 3133static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 3134{ 3135 int dispatched = 0; 3136 3137 while (cfqq->next_rq) { 3138 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 3139 dispatched++; 3140 } 3141 3142 BUG_ON(!list_empty(&cfqq->fifo)); 3143 3144 /* By default cfqq is not expired if it is empty. Do it explicitly */ 3145 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); 3146 return dispatched; 3147} 3148 3149/* 3150 * Drain our current requests. Used for barriers and when switching 3151 * io schedulers on-the-fly. 3152 */ 3153static int cfq_forced_dispatch(struct cfq_data *cfqd) 3154{ 3155 struct cfq_queue *cfqq; 3156 int dispatched = 0; 3157 3158 /* Expire the timeslice of the current active queue first */ 3159 cfq_slice_expired(cfqd, 0); 3160 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { 3161 __cfq_set_active_queue(cfqd, cfqq); 3162 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 3163 } 3164 3165 BUG_ON(cfqd->busy_queues); 3166 3167 cfq_log(cfqd, "forced_dispatch=%d", dispatched); 3168 return dispatched; 3169} 3170 3171static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, 3172 struct cfq_queue *cfqq) 3173{ 3174 /* the queue hasn't finished any request, can't estimate */ 3175 if (cfq_cfqq_slice_new(cfqq)) 3176 return true; 3177 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, 3178 cfqq->slice_end)) 3179 return true; 3180 3181 return false; 3182} 3183 3184static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3185{ 3186 unsigned int max_dispatch; 3187 3188 /* 3189 * Drain async requests before we start sync IO 3190 */ 3191 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) 3192 return false; 3193 3194 /* 3195 * If this is an async queue and we have sync IO in flight, let it wait 3196 */ 3197 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) 3198 return false; 3199 3200 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); 3201 if (cfq_class_idle(cfqq)) 3202 max_dispatch = 1; 3203 3204 /* 3205 * Does this cfqq already have too much IO in flight? 3206 */ 3207 if (cfqq->dispatched >= max_dispatch) { 3208 bool promote_sync = false; 3209 /* 3210 * idle queue must always only have a single IO in flight 3211 */ 3212 if (cfq_class_idle(cfqq)) 3213 return false; 3214 3215 /* 3216 * If there is only one sync queue 3217 * we can ignore async queue here and give the sync 3218 * queue no dispatch limit. The reason is a sync queue can 3219 * preempt async queue, limiting the sync queue doesn't make 3220 * sense. This is useful for aiostress test. 3221 */ 3222 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) 3223 promote_sync = true; 3224 3225 /* 3226 * We have other queues, don't allow more IO from this one 3227 */ 3228 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && 3229 !promote_sync) 3230 return false; 3231 3232 /* 3233 * Sole queue user, no limit 3234 */ 3235 if (cfqd->busy_queues == 1 || promote_sync) 3236 max_dispatch = -1; 3237 else 3238 /* 3239 * Normally we start throttling cfqq when cfq_quantum/2 3240 * requests have been dispatched. But we can drive 3241 * deeper queue depths at the beginning of slice 3242 * subjected to upper limit of cfq_quantum. 3243 * */ 3244 max_dispatch = cfqd->cfq_quantum; 3245 } 3246 3247 /* 3248 * Async queues must wait a bit before being allowed dispatch. 3249 * We also ramp up the dispatch depth gradually for async IO, 3250 * based on the last sync IO we serviced 3251 */ 3252 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 3253 unsigned long last_sync = jiffies - cfqd->last_delayed_sync; 3254 unsigned int depth; 3255 3256 depth = last_sync / cfqd->cfq_slice[1]; 3257 if (!depth && !cfqq->dispatched) 3258 depth = 1; 3259 if (depth < max_dispatch) 3260 max_dispatch = depth; 3261 } 3262 3263 /* 3264 * If we're below the current max, allow a dispatch 3265 */ 3266 return cfqq->dispatched < max_dispatch; 3267} 3268 3269/* 3270 * Dispatch a request from cfqq, moving them to the request queue 3271 * dispatch list. 3272 */ 3273static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3274{ 3275 struct request *rq; 3276 3277 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 3278 3279 if (!cfq_may_dispatch(cfqd, cfqq)) 3280 return false; 3281 3282 /* 3283 * follow expired path, else get first next available 3284 */ 3285 rq = cfq_check_fifo(cfqq); 3286 if (!rq) 3287 rq = cfqq->next_rq; 3288 3289 /* 3290 * insert request into driver dispatch list 3291 */ 3292 cfq_dispatch_insert(cfqd->queue, rq); 3293 3294 if (!cfqd->active_cic) { 3295 struct cfq_io_cq *cic = RQ_CIC(rq); 3296 3297 atomic_long_inc(&cic->icq.ioc->refcount); 3298 cfqd->active_cic = cic; 3299 } 3300 3301 return true; 3302} 3303 3304/* 3305 * Find the cfqq that we need to service and move a request from that to the 3306 * dispatch list 3307 */ 3308static int cfq_dispatch_requests(struct request_queue *q, int force) 3309{ 3310 struct cfq_data *cfqd = q->elevator->elevator_data; 3311 struct cfq_queue *cfqq; 3312 3313 if (!cfqd->busy_queues) 3314 return 0; 3315 3316 if (unlikely(force)) 3317 return cfq_forced_dispatch(cfqd); 3318 3319 cfqq = cfq_select_queue(cfqd); 3320 if (!cfqq) 3321 return 0; 3322 3323 /* 3324 * Dispatch a request from this cfqq, if it is allowed 3325 */ 3326 if (!cfq_dispatch_request(cfqd, cfqq)) 3327 return 0; 3328 3329 cfqq->slice_dispatch++; 3330 cfq_clear_cfqq_must_dispatch(cfqq); 3331 3332 /* 3333 * expire an async queue immediately if it has used up its slice. idle 3334 * queue always expire after 1 dispatch round. 3335 */ 3336 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 3337 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || 3338 cfq_class_idle(cfqq))) { 3339 cfqq->slice_end = jiffies + 1; 3340 cfq_slice_expired(cfqd, 0); 3341 } 3342 3343 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); 3344 return 1; 3345} 3346 3347/* 3348 * task holds one reference to the queue, dropped when task exits. each rq 3349 * in-flight on this queue also holds a reference, dropped when rq is freed. 3350 * 3351 * Each cfq queue took a reference on the parent group. Drop it now. 3352 * queue lock must be held here. 3353 */ 3354static void cfq_put_queue(struct cfq_queue *cfqq) 3355{ 3356 struct cfq_data *cfqd = cfqq->cfqd; 3357 struct cfq_group *cfqg; 3358 3359 BUG_ON(cfqq->ref <= 0); 3360 3361 cfqq->ref--; 3362 if (cfqq->ref) 3363 return; 3364 3365 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 3366 BUG_ON(rb_first(&cfqq->sort_list)); 3367 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 3368 cfqg = cfqq->cfqg; 3369 3370 if (unlikely(cfqd->active_queue == cfqq)) { 3371 __cfq_slice_expired(cfqd, cfqq, 0); 3372 cfq_schedule_dispatch(cfqd); 3373 } 3374 3375 BUG_ON(cfq_cfqq_on_rr(cfqq)); 3376 kmem_cache_free(cfq_pool, cfqq); 3377 cfqg_put(cfqg); 3378} 3379 3380static void cfq_put_cooperator(struct cfq_queue *cfqq) 3381{ 3382 struct cfq_queue *__cfqq, *next; 3383 3384 /* 3385 * If this queue was scheduled to merge with another queue, be 3386 * sure to drop the reference taken on that queue (and others in 3387 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. 3388 */ 3389 __cfqq = cfqq->new_cfqq; 3390 while (__cfqq) { 3391 if (__cfqq == cfqq) { 3392 WARN(1, "cfqq->new_cfqq loop detected\n"); 3393 break; 3394 } 3395 next = __cfqq->new_cfqq; 3396 cfq_put_queue(__cfqq); 3397 __cfqq = next; 3398 } 3399} 3400 3401static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3402{ 3403 if (unlikely(cfqq == cfqd->active_queue)) { 3404 __cfq_slice_expired(cfqd, cfqq, 0); 3405 cfq_schedule_dispatch(cfqd); 3406 } 3407 3408 cfq_put_cooperator(cfqq); 3409 3410 cfq_put_queue(cfqq); 3411} 3412 3413static void cfq_init_icq(struct io_cq *icq) 3414{ 3415 struct cfq_io_cq *cic = icq_to_cic(icq); 3416 3417 cic->ttime.last_end_request = jiffies; 3418} 3419 3420static void cfq_exit_icq(struct io_cq *icq) 3421{ 3422 struct cfq_io_cq *cic = icq_to_cic(icq); 3423 struct cfq_data *cfqd = cic_to_cfqd(cic); 3424 3425 if (cic->cfqq[BLK_RW_ASYNC]) { 3426 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 3427 cic->cfqq[BLK_RW_ASYNC] = NULL; 3428 } 3429 3430 if (cic->cfqq[BLK_RW_SYNC]) { 3431 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); 3432 cic->cfqq[BLK_RW_SYNC] = NULL; 3433 } 3434} 3435 3436static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) 3437{ 3438 struct task_struct *tsk = current; 3439 int ioprio_class; 3440 3441 if (!cfq_cfqq_prio_changed(cfqq)) 3442 return; 3443 3444 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); 3445 switch (ioprio_class) { 3446 default: 3447 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 3448 case IOPRIO_CLASS_NONE: 3449 /* 3450 * no prio set, inherit CPU scheduling settings 3451 */ 3452 cfqq->ioprio = task_nice_ioprio(tsk); 3453 cfqq->ioprio_class = task_nice_ioclass(tsk); 3454 break; 3455 case IOPRIO_CLASS_RT: 3456 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3457 cfqq->ioprio_class = IOPRIO_CLASS_RT; 3458 break; 3459 case IOPRIO_CLASS_BE: 3460 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3461 cfqq->ioprio_class = IOPRIO_CLASS_BE; 3462 break; 3463 case IOPRIO_CLASS_IDLE: 3464 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 3465 cfqq->ioprio = 7; 3466 cfq_clear_cfqq_idle_window(cfqq); 3467 break; 3468 } 3469 3470 /* 3471 * keep track of original prio settings in case we have to temporarily 3472 * elevate the priority of this queue 3473 */ 3474 cfqq->org_ioprio = cfqq->ioprio; 3475 cfq_clear_cfqq_prio_changed(cfqq); 3476} 3477 3478static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) 3479{ 3480 int ioprio = cic->icq.ioc->ioprio; 3481 struct cfq_data *cfqd = cic_to_cfqd(cic); 3482 struct cfq_queue *cfqq; 3483 3484 /* 3485 * Check whether ioprio has changed. The condition may trigger 3486 * spuriously on a newly created cic but there's no harm. 3487 */ 3488 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) 3489 return; 3490 3491 cfqq = cic->cfqq[BLK_RW_ASYNC]; 3492 if (cfqq) { 3493 struct cfq_queue *new_cfqq; 3494 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, 3495 GFP_ATOMIC); 3496 if (new_cfqq) { 3497 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 3498 cfq_put_queue(cfqq); 3499 } 3500 } 3501 3502 cfqq = cic->cfqq[BLK_RW_SYNC]; 3503 if (cfqq) 3504 cfq_mark_cfqq_prio_changed(cfqq); 3505 3506 cic->ioprio = ioprio; 3507} 3508 3509static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3510 pid_t pid, bool is_sync) 3511{ 3512 RB_CLEAR_NODE(&cfqq->rb_node); 3513 RB_CLEAR_NODE(&cfqq->p_node); 3514 INIT_LIST_HEAD(&cfqq->fifo); 3515 3516 cfqq->ref = 0; 3517 cfqq->cfqd = cfqd; 3518 3519 cfq_mark_cfqq_prio_changed(cfqq); 3520 3521 if (is_sync) { 3522 if (!cfq_class_idle(cfqq)) 3523 cfq_mark_cfqq_idle_window(cfqq); 3524 cfq_mark_cfqq_sync(cfqq); 3525 } 3526 cfqq->pid = pid; 3527} 3528 3529#ifdef CONFIG_CFQ_GROUP_IOSCHED 3530static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) 3531{ 3532 struct cfq_data *cfqd = cic_to_cfqd(cic); 3533 struct cfq_queue *sync_cfqq; 3534 uint64_t id; 3535 3536 rcu_read_lock(); 3537 id = bio_blkcg(bio)->id; 3538 rcu_read_unlock(); 3539 3540 /* 3541 * Check whether blkcg has changed. The condition may trigger 3542 * spuriously on a newly created cic but there's no harm. 3543 */ 3544 if (unlikely(!cfqd) || likely(cic->blkcg_id == id)) 3545 return; 3546 3547 sync_cfqq = cic_to_cfqq(cic, 1); 3548 if (sync_cfqq) { 3549 /* 3550 * Drop reference to sync queue. A new sync queue will be 3551 * assigned in new group upon arrival of a fresh request. 3552 */ 3553 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); 3554 cic_set_cfqq(cic, NULL, 1); 3555 cfq_put_queue(sync_cfqq); 3556 } 3557 3558 cic->blkcg_id = id; 3559} 3560#else 3561static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } 3562#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 3563 3564static struct cfq_queue * 3565cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 3566 struct bio *bio, gfp_t gfp_mask) 3567{ 3568 struct blkcg *blkcg; 3569 struct cfq_queue *cfqq, *new_cfqq = NULL; 3570 struct cfq_group *cfqg; 3571 3572retry: 3573 rcu_read_lock(); 3574 3575 blkcg = bio_blkcg(bio); 3576 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); 3577 cfqq = cic_to_cfqq(cic, is_sync); 3578 3579 /* 3580 * Always try a new alloc if we fell back to the OOM cfqq 3581 * originally, since it should just be a temporary situation. 3582 */ 3583 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 3584 cfqq = NULL; 3585 if (new_cfqq) { 3586 cfqq = new_cfqq; 3587 new_cfqq = NULL; 3588 } else if (gfp_mask & __GFP_WAIT) { 3589 rcu_read_unlock(); 3590 spin_unlock_irq(cfqd->queue->queue_lock); 3591 new_cfqq = kmem_cache_alloc_node(cfq_pool, 3592 gfp_mask | __GFP_ZERO, 3593 cfqd->queue->node); 3594 spin_lock_irq(cfqd->queue->queue_lock); 3595 if (new_cfqq) 3596 goto retry; 3597 else 3598 return &cfqd->oom_cfqq; 3599 } else { 3600 cfqq = kmem_cache_alloc_node(cfq_pool, 3601 gfp_mask | __GFP_ZERO, 3602 cfqd->queue->node); 3603 } 3604 3605 if (cfqq) { 3606 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); 3607 cfq_init_prio_data(cfqq, cic); 3608 cfq_link_cfqq_cfqg(cfqq, cfqg); 3609 cfq_log_cfqq(cfqd, cfqq, "alloced"); 3610 } else 3611 cfqq = &cfqd->oom_cfqq; 3612 } 3613 3614 if (new_cfqq) 3615 kmem_cache_free(cfq_pool, new_cfqq); 3616 3617 rcu_read_unlock(); 3618 return cfqq; 3619} 3620 3621static struct cfq_queue ** 3622cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 3623{ 3624 switch (ioprio_class) { 3625 case IOPRIO_CLASS_RT: 3626 return &cfqd->async_cfqq[0][ioprio]; 3627 case IOPRIO_CLASS_NONE: 3628 ioprio = IOPRIO_NORM; 3629 /* fall through */ 3630 case IOPRIO_CLASS_BE: 3631 return &cfqd->async_cfqq[1][ioprio]; 3632 case IOPRIO_CLASS_IDLE: 3633 return &cfqd->async_idle_cfqq; 3634 default: 3635 BUG(); 3636 } 3637} 3638 3639static struct cfq_queue * 3640cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 3641 struct bio *bio, gfp_t gfp_mask) 3642{ 3643 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); 3644 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3645 struct cfq_queue **async_cfqq = NULL; 3646 struct cfq_queue *cfqq = NULL; 3647 3648 if (!is_sync) { 3649 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 3650 cfqq = *async_cfqq; 3651 } 3652 3653 if (!cfqq) 3654 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); 3655 3656 /* 3657 * pin the queue now that it's allocated, scheduler exit will prune it 3658 */ 3659 if (!is_sync && !(*async_cfqq)) { 3660 cfqq->ref++; 3661 *async_cfqq = cfqq; 3662 } 3663 3664 cfqq->ref++; 3665 return cfqq; 3666} 3667 3668static void 3669__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) 3670{ 3671 unsigned long elapsed = jiffies - ttime->last_end_request; 3672 elapsed = min(elapsed, 2UL * slice_idle); 3673 3674 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; 3675 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8; 3676 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples; 3677} 3678 3679static void 3680cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3681 struct cfq_io_cq *cic) 3682{ 3683 if (cfq_cfqq_sync(cfqq)) { 3684 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); 3685 __cfq_update_io_thinktime(&cfqq->service_tree->ttime, 3686 cfqd->cfq_slice_idle); 3687 } 3688#ifdef CONFIG_CFQ_GROUP_IOSCHED 3689 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); 3690#endif 3691} 3692 3693static void 3694cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3695 struct request *rq) 3696{ 3697 sector_t sdist = 0; 3698 sector_t n_sec = blk_rq_sectors(rq); 3699 if (cfqq->last_request_pos) { 3700 if (cfqq->last_request_pos < blk_rq_pos(rq)) 3701 sdist = blk_rq_pos(rq) - cfqq->last_request_pos; 3702 else 3703 sdist = cfqq->last_request_pos - blk_rq_pos(rq); 3704 } 3705 3706 cfqq->seek_history <<= 1; 3707 if (blk_queue_nonrot(cfqd->queue)) 3708 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); 3709 else 3710 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); 3711} 3712 3713/* 3714 * Disable idle window if the process thinks too long or seeks so much that 3715 * it doesn't matter 3716 */ 3717static void 3718cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3719 struct cfq_io_cq *cic) 3720{ 3721 int old_idle, enable_idle; 3722 3723 /* 3724 * Don't idle for async or idle io prio class 3725 */ 3726 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 3727 return; 3728 3729 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 3730 3731 if (cfqq->queued[0] + cfqq->queued[1] >= 4) 3732 cfq_mark_cfqq_deep(cfqq); 3733 3734 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) 3735 enable_idle = 0; 3736 else if (!atomic_read(&cic->icq.ioc->active_ref) || 3737 !cfqd->cfq_slice_idle || 3738 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) 3739 enable_idle = 0; 3740 else if (sample_valid(cic->ttime.ttime_samples)) { 3741 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) 3742 enable_idle = 0; 3743 else 3744 enable_idle = 1; 3745 } 3746 3747 if (old_idle != enable_idle) { 3748 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); 3749 if (enable_idle) 3750 cfq_mark_cfqq_idle_window(cfqq); 3751 else 3752 cfq_clear_cfqq_idle_window(cfqq); 3753 } 3754} 3755 3756/* 3757 * Check if new_cfqq should preempt the currently active queue. Return 0 for 3758 * no or if we aren't sure, a 1 will cause a preempt. 3759 */ 3760static bool 3761cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 3762 struct request *rq) 3763{ 3764 struct cfq_queue *cfqq; 3765 3766 cfqq = cfqd->active_queue; 3767 if (!cfqq) 3768 return false; 3769 3770 if (cfq_class_idle(new_cfqq)) 3771 return false; 3772 3773 if (cfq_class_idle(cfqq)) 3774 return true; 3775 3776 /* 3777 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. 3778 */ 3779 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) 3780 return false; 3781 3782 /* 3783 * if the new request is sync, but the currently running queue is 3784 * not, let the sync request have priority. 3785 */ 3786 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 3787 return true; 3788 3789 if (new_cfqq->cfqg != cfqq->cfqg) 3790 return false; 3791 3792 if (cfq_slice_used(cfqq)) 3793 return true; 3794 3795 /* Allow preemption only if we are idling on sync-noidle tree */ 3796 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && 3797 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && 3798 new_cfqq->service_tree->count == 2 && 3799 RB_EMPTY_ROOT(&cfqq->sort_list)) 3800 return true; 3801 3802 /* 3803 * So both queues are sync. Let the new request get disk time if 3804 * it's a metadata request and the current queue is doing regular IO. 3805 */ 3806 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) 3807 return true; 3808 3809 /* 3810 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 3811 */ 3812 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 3813 return true; 3814 3815 /* An idle queue should not be idle now for some reason */ 3816 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) 3817 return true; 3818 3819 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 3820 return false; 3821 3822 /* 3823 * if this request is as-good as one we would expect from the 3824 * current cfqq, let it preempt 3825 */ 3826 if (cfq_rq_close(cfqd, cfqq, rq)) 3827 return true; 3828 3829 return false; 3830} 3831 3832/* 3833 * cfqq preempts the active queue. if we allowed preempt with no slice left, 3834 * let it have half of its nominal slice. 3835 */ 3836static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3837{ 3838 enum wl_type_t old_type = cfqq_type(cfqd->active_queue); 3839 3840 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3841 cfq_slice_expired(cfqd, 1); 3842 3843 /* 3844 * workload type is changed, don't save slice, otherwise preempt 3845 * doesn't happen 3846 */ 3847 if (old_type != cfqq_type(cfqq)) 3848 cfqq->cfqg->saved_wl_slice = 0; 3849 3850 /* 3851 * Put the new queue at the front of the of the current list, 3852 * so we know that it will be selected next. 3853 */ 3854 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3855 3856 cfq_service_tree_add(cfqd, cfqq, 1); 3857 3858 cfqq->slice_end = 0; 3859 cfq_mark_cfqq_slice_new(cfqq); 3860} 3861 3862/* 3863 * Called when a new fs request (rq) is added (to cfqq). Check if there's 3864 * something we should do about it 3865 */ 3866static void 3867cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3868 struct request *rq) 3869{ 3870 struct cfq_io_cq *cic = RQ_CIC(rq); 3871 3872 cfqd->rq_queued++; 3873 if (rq->cmd_flags & REQ_PRIO) 3874 cfqq->prio_pending++; 3875 3876 cfq_update_io_thinktime(cfqd, cfqq, cic); 3877 cfq_update_io_seektime(cfqd, cfqq, rq); 3878 cfq_update_idle_window(cfqd, cfqq, cic); 3879 3880 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 3881 3882 if (cfqq == cfqd->active_queue) { 3883 /* 3884 * Remember that we saw a request from this process, but 3885 * don't start queuing just yet. Otherwise we risk seeing lots 3886 * of tiny requests, because we disrupt the normal plugging 3887 * and merging. If the request is already larger than a single 3888 * page, let it rip immediately. For that case we assume that 3889 * merging is already done. Ditto for a busy system that 3890 * has other work pending, don't risk delaying until the 3891 * idle timer unplug to continue working. 3892 */ 3893 if (cfq_cfqq_wait_request(cfqq)) { 3894 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 3895 cfqd->busy_queues > 1) { 3896 cfq_del_timer(cfqd, cfqq); 3897 cfq_clear_cfqq_wait_request(cfqq); 3898 __blk_run_queue(cfqd->queue); 3899 } else { 3900 cfqg_stats_update_idle_time(cfqq->cfqg); 3901 cfq_mark_cfqq_must_dispatch(cfqq); 3902 } 3903 } 3904 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 3905 /* 3906 * not the active queue - expire current slice if it is 3907 * idle and has expired it's mean thinktime or this new queue 3908 * has some old slice time left and is of higher priority or 3909 * this new queue is RT and the current one is BE 3910 */ 3911 cfq_preempt_queue(cfqd, cfqq); 3912 __blk_run_queue(cfqd->queue); 3913 } 3914} 3915 3916static void cfq_insert_request(struct request_queue *q, struct request *rq) 3917{ 3918 struct cfq_data *cfqd = q->elevator->elevator_data; 3919 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3920 3921 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 3922 cfq_init_prio_data(cfqq, RQ_CIC(rq)); 3923 3924 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3925 list_add_tail(&rq->queuelist, &cfqq->fifo); 3926 cfq_add_rq_rb(rq); 3927 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, 3928 rq->cmd_flags); 3929 cfq_rq_enqueued(cfqd, cfqq, rq); 3930} 3931 3932/* 3933 * Update hw_tag based on peak queue depth over 50 samples under 3934 * sufficient load. 3935 */ 3936static void cfq_update_hw_tag(struct cfq_data *cfqd) 3937{ 3938 struct cfq_queue *cfqq = cfqd->active_queue; 3939 3940 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) 3941 cfqd->hw_tag_est_depth = cfqd->rq_in_driver; 3942 3943 if (cfqd->hw_tag == 1) 3944 return; 3945 3946 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 3947 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) 3948 return; 3949 3950 /* 3951 * If active queue hasn't enough requests and can idle, cfq might not 3952 * dispatch sufficient requests to hardware. Don't zero hw_tag in this 3953 * case 3954 */ 3955 if (cfqq && cfq_cfqq_idle_window(cfqq) && 3956 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < 3957 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) 3958 return; 3959 3960 if (cfqd->hw_tag_samples++ < 50) 3961 return; 3962 3963 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) 3964 cfqd->hw_tag = 1; 3965 else 3966 cfqd->hw_tag = 0; 3967} 3968 3969static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3970{ 3971 struct cfq_io_cq *cic = cfqd->active_cic; 3972 3973 /* If the queue already has requests, don't wait */ 3974 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 3975 return false; 3976 3977 /* If there are other queues in the group, don't wait */ 3978 if (cfqq->cfqg->nr_cfqq > 1) 3979 return false; 3980 3981 /* the only queue in the group, but think time is big */ 3982 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) 3983 return false; 3984 3985 if (cfq_slice_used(cfqq)) 3986 return true; 3987 3988 /* if slice left is less than think time, wait busy */ 3989 if (cic && sample_valid(cic->ttime.ttime_samples) 3990 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) 3991 return true; 3992 3993 /* 3994 * If think times is less than a jiffy than ttime_mean=0 and above 3995 * will not be true. It might happen that slice has not expired yet 3996 * but will expire soon (4-5 ns) during select_queue(). To cover the 3997 * case where think time is less than a jiffy, mark the queue wait 3998 * busy if only 1 jiffy is left in the slice. 3999 */ 4000 if (cfqq->slice_end - jiffies == 1) 4001 return true; 4002 4003 return false; 4004} 4005 4006static void cfq_completed_request(struct request_queue *q, struct request *rq) 4007{ 4008 struct cfq_queue *cfqq = RQ_CFQQ(rq); 4009 struct cfq_data *cfqd = cfqq->cfqd; 4010 const int sync = rq_is_sync(rq); 4011 unsigned long now; 4012 4013 now = jiffies; 4014 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", 4015 !!(rq->cmd_flags & REQ_NOIDLE)); 4016 4017 cfq_update_hw_tag(cfqd); 4018 4019 WARN_ON(!cfqd->rq_in_driver); 4020 WARN_ON(!cfqq->dispatched); 4021 cfqd->rq_in_driver--; 4022 cfqq->dispatched--; 4023 (RQ_CFQG(rq))->dispatched--; 4024 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), 4025 rq_io_start_time_ns(rq), rq->cmd_flags); 4026 4027 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 4028 4029 if (sync) { 4030 struct cfq_rb_root *st; 4031 4032 RQ_CIC(rq)->ttime.last_end_request = now; 4033 4034 if (cfq_cfqq_on_rr(cfqq)) 4035 st = cfqq->service_tree; 4036 else 4037 st = st_for(cfqq->cfqg, cfqq_class(cfqq), 4038 cfqq_type(cfqq)); 4039 4040 st->ttime.last_end_request = now; 4041 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) 4042 cfqd->last_delayed_sync = now; 4043 } 4044 4045#ifdef CONFIG_CFQ_GROUP_IOSCHED 4046 cfqq->cfqg->ttime.last_end_request = now; 4047#endif 4048 4049 /* 4050 * If this is the active queue, check if it needs to be expired, 4051 * or if we want to idle in case it has no pending requests. 4052 */ 4053 if (cfqd->active_queue == cfqq) { 4054 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); 4055 4056 if (cfq_cfqq_slice_new(cfqq)) { 4057 cfq_set_prio_slice(cfqd, cfqq); 4058 cfq_clear_cfqq_slice_new(cfqq); 4059 } 4060 4061 /* 4062 * Should we wait for next request to come in before we expire 4063 * the queue. 4064 */ 4065 if (cfq_should_wait_busy(cfqd, cfqq)) { 4066 unsigned long extend_sl = cfqd->cfq_slice_idle; 4067 if (!cfqd->cfq_slice_idle) 4068 extend_sl = cfqd->cfq_group_idle; 4069 cfqq->slice_end = jiffies + extend_sl; 4070 cfq_mark_cfqq_wait_busy(cfqq); 4071 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 4072 } 4073 4074 /* 4075 * Idling is not enabled on: 4076 * - expired queues 4077 * - idle-priority queues 4078 * - async queues 4079 * - queues with still some requests queued 4080 * - when there is a close cooperator 4081 */ 4082 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 4083 cfq_slice_expired(cfqd, 1); 4084 else if (sync && cfqq_empty && 4085 !cfq_close_cooperator(cfqd, cfqq)) { 4086 cfq_arm_slice_timer(cfqd); 4087 } 4088 } 4089 4090 if (!cfqd->rq_in_driver) 4091 cfq_schedule_dispatch(cfqd); 4092} 4093 4094static inline int __cfq_may_queue(struct cfq_queue *cfqq) 4095{ 4096 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { 4097 cfq_mark_cfqq_must_alloc_slice(cfqq); 4098 return ELV_MQUEUE_MUST; 4099 } 4100 4101 return ELV_MQUEUE_MAY; 4102} 4103 4104static int cfq_may_queue(struct request_queue *q, int rw) 4105{ 4106 struct cfq_data *cfqd = q->elevator->elevator_data; 4107 struct task_struct *tsk = current; 4108 struct cfq_io_cq *cic; 4109 struct cfq_queue *cfqq; 4110 4111 /* 4112 * don't force setup of a queue from here, as a call to may_queue 4113 * does not necessarily imply that a request actually will be queued. 4114 * so just lookup a possibly existing queue, or return 'may queue' 4115 * if that fails 4116 */ 4117 cic = cfq_cic_lookup(cfqd, tsk->io_context); 4118 if (!cic) 4119 return ELV_MQUEUE_MAY; 4120 4121 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 4122 if (cfqq) { 4123 cfq_init_prio_data(cfqq, cic); 4124 4125 return __cfq_may_queue(cfqq); 4126 } 4127 4128 return ELV_MQUEUE_MAY; 4129} 4130 4131/* 4132 * queue lock held here 4133 */ 4134static void cfq_put_request(struct request *rq) 4135{ 4136 struct cfq_queue *cfqq = RQ_CFQQ(rq); 4137 4138 if (cfqq) { 4139 const int rw = rq_data_dir(rq); 4140 4141 BUG_ON(!cfqq->allocated[rw]); 4142 cfqq->allocated[rw]--; 4143 4144 /* Put down rq reference on cfqg */ 4145 cfqg_put(RQ_CFQG(rq)); 4146 rq->elv.priv[0] = NULL; 4147 rq->elv.priv[1] = NULL; 4148 4149 cfq_put_queue(cfqq); 4150 } 4151} 4152 4153static struct cfq_queue * 4154cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, 4155 struct cfq_queue *cfqq) 4156{ 4157 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); 4158 cic_set_cfqq(cic, cfqq->new_cfqq, 1); 4159 cfq_mark_cfqq_coop(cfqq->new_cfqq); 4160 cfq_put_queue(cfqq); 4161 return cic_to_cfqq(cic, 1); 4162} 4163 4164/* 4165 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 4166 * was the last process referring to said cfqq. 4167 */ 4168static struct cfq_queue * 4169split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) 4170{ 4171 if (cfqq_process_refs(cfqq) == 1) { 4172 cfqq->pid = current->pid; 4173 cfq_clear_cfqq_coop(cfqq); 4174 cfq_clear_cfqq_split_coop(cfqq); 4175 return cfqq; 4176 } 4177 4178 cic_set_cfqq(cic, NULL, 1); 4179 4180 cfq_put_cooperator(cfqq); 4181 4182 cfq_put_queue(cfqq); 4183 return NULL; 4184} 4185/* 4186 * Allocate cfq data structures associated with this request. 4187 */ 4188static int 4189cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, 4190 gfp_t gfp_mask) 4191{ 4192 struct cfq_data *cfqd = q->elevator->elevator_data; 4193 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq); 4194 const int rw = rq_data_dir(rq); 4195 const bool is_sync = rq_is_sync(rq); 4196 struct cfq_queue *cfqq; 4197 4198 might_sleep_if(gfp_mask & __GFP_WAIT); 4199 4200 spin_lock_irq(q->queue_lock); 4201 4202 check_ioprio_changed(cic, bio); 4203 check_blkcg_changed(cic, bio); 4204new_queue: 4205 cfqq = cic_to_cfqq(cic, is_sync); 4206 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 4207 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); 4208 cic_set_cfqq(cic, cfqq, is_sync); 4209 } else { 4210 /* 4211 * If the queue was seeky for too long, break it apart. 4212 */ 4213 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { 4214 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 4215 cfqq = split_cfqq(cic, cfqq); 4216 if (!cfqq) 4217 goto new_queue; 4218 } 4219 4220 /* 4221 * Check to see if this queue is scheduled to merge with 4222 * another, closely cooperating queue. The merging of 4223 * queues happens here as it must be done in process context. 4224 * The reference on new_cfqq was taken in merge_cfqqs. 4225 */ 4226 if (cfqq->new_cfqq) 4227 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); 4228 } 4229 4230 cfqq->allocated[rw]++; 4231 4232 cfqq->ref++; 4233 cfqg_get(cfqq->cfqg); 4234 rq->elv.priv[0] = cfqq; 4235 rq->elv.priv[1] = cfqq->cfqg; 4236 spin_unlock_irq(q->queue_lock); 4237 return 0; 4238} 4239 4240static void cfq_kick_queue(struct work_struct *work) 4241{ 4242 struct cfq_data *cfqd = 4243 container_of(work, struct cfq_data, unplug_work); 4244 struct request_queue *q = cfqd->queue; 4245 4246 spin_lock_irq(q->queue_lock); 4247 __blk_run_queue(cfqd->queue); 4248 spin_unlock_irq(q->queue_lock); 4249} 4250 4251/* 4252 * Timer running if the active_queue is currently idling inside its time slice 4253 */ 4254static void cfq_idle_slice_timer(unsigned long data) 4255{ 4256 struct cfq_data *cfqd = (struct cfq_data *) data; 4257 struct cfq_queue *cfqq; 4258 unsigned long flags; 4259 int timed_out = 1; 4260 4261 cfq_log(cfqd, "idle timer fired"); 4262 4263 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 4264 4265 cfqq = cfqd->active_queue; 4266 if (cfqq) { 4267 timed_out = 0; 4268 4269 /* 4270 * We saw a request before the queue expired, let it through 4271 */ 4272 if (cfq_cfqq_must_dispatch(cfqq)) 4273 goto out_kick; 4274 4275 /* 4276 * expired 4277 */ 4278 if (cfq_slice_used(cfqq)) 4279 goto expire; 4280 4281 /* 4282 * only expire and reinvoke request handler, if there are 4283 * other queues with pending requests 4284 */ 4285 if (!cfqd->busy_queues) 4286 goto out_cont; 4287 4288 /* 4289 * not expired and it has a request pending, let it dispatch 4290 */ 4291 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 4292 goto out_kick; 4293 4294 /* 4295 * Queue depth flag is reset only when the idle didn't succeed 4296 */ 4297 cfq_clear_cfqq_deep(cfqq); 4298 } 4299expire: 4300 cfq_slice_expired(cfqd, timed_out); 4301out_kick: 4302 cfq_schedule_dispatch(cfqd); 4303out_cont: 4304 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 4305} 4306 4307static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 4308{ 4309 del_timer_sync(&cfqd->idle_slice_timer); 4310 cancel_work_sync(&cfqd->unplug_work); 4311} 4312 4313static void cfq_put_async_queues(struct cfq_data *cfqd) 4314{ 4315 int i; 4316 4317 for (i = 0; i < IOPRIO_BE_NR; i++) { 4318 if (cfqd->async_cfqq[0][i]) 4319 cfq_put_queue(cfqd->async_cfqq[0][i]); 4320 if (cfqd->async_cfqq[1][i]) 4321 cfq_put_queue(cfqd->async_cfqq[1][i]); 4322 } 4323 4324 if (cfqd->async_idle_cfqq) 4325 cfq_put_queue(cfqd->async_idle_cfqq); 4326} 4327 4328static void cfq_exit_queue(struct elevator_queue *e) 4329{ 4330 struct cfq_data *cfqd = e->elevator_data; 4331 struct request_queue *q = cfqd->queue; 4332 4333 cfq_shutdown_timer_wq(cfqd); 4334 4335 spin_lock_irq(q->queue_lock); 4336 4337 if (cfqd->active_queue) 4338 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 4339 4340 cfq_put_async_queues(cfqd); 4341 4342 spin_unlock_irq(q->queue_lock); 4343 4344 cfq_shutdown_timer_wq(cfqd); 4345 4346#ifdef CONFIG_CFQ_GROUP_IOSCHED 4347 blkcg_deactivate_policy(q, &blkcg_policy_cfq); 4348#else 4349 kfree(cfqd->root_group); 4350#endif 4351 kfree(cfqd); 4352} 4353 4354static int cfq_init_queue(struct request_queue *q) 4355{ 4356 struct cfq_data *cfqd; 4357 struct blkcg_gq *blkg __maybe_unused; 4358 int i, ret; 4359 4360 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 4361 if (!cfqd) 4362 return -ENOMEM; 4363 4364 cfqd->queue = q; 4365 q->elevator->elevator_data = cfqd; 4366 4367 /* Init root service tree */ 4368 cfqd->grp_service_tree = CFQ_RB_ROOT; 4369 4370 /* Init root group and prefer root group over other groups by default */ 4371#ifdef CONFIG_CFQ_GROUP_IOSCHED 4372 ret = blkcg_activate_policy(q, &blkcg_policy_cfq); 4373 if (ret) 4374 goto out_free; 4375 4376 cfqd->root_group = blkg_to_cfqg(q->root_blkg); 4377#else 4378 ret = -ENOMEM; 4379 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), 4380 GFP_KERNEL, cfqd->queue->node); 4381 if (!cfqd->root_group) 4382 goto out_free; 4383 4384 cfq_init_cfqg_base(cfqd->root_group); 4385#endif 4386 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; 4387 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT; 4388 4389 /* 4390 * Not strictly needed (since RB_ROOT just clears the node and we 4391 * zeroed cfqd on alloc), but better be safe in case someone decides 4392 * to add magic to the rb code 4393 */ 4394 for (i = 0; i < CFQ_PRIO_LISTS; i++) 4395 cfqd->prio_trees[i] = RB_ROOT; 4396 4397 /* 4398 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. 4399 * Grab a permanent reference to it, so that the normal code flow 4400 * will not attempt to free it. oom_cfqq is linked to root_group 4401 * but shouldn't hold a reference as it'll never be unlinked. Lose 4402 * the reference from linking right away. 4403 */ 4404 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 4405 cfqd->oom_cfqq.ref++; 4406 4407 spin_lock_irq(q->queue_lock); 4408 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); 4409 cfqg_put(cfqd->root_group); 4410 spin_unlock_irq(q->queue_lock); 4411 4412 init_timer(&cfqd->idle_slice_timer); 4413 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 4414 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 4415 4416 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 4417 4418 cfqd->cfq_quantum = cfq_quantum; 4419 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 4420 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 4421 cfqd->cfq_back_max = cfq_back_max; 4422 cfqd->cfq_back_penalty = cfq_back_penalty; 4423 cfqd->cfq_slice[0] = cfq_slice_async; 4424 cfqd->cfq_slice[1] = cfq_slice_sync; 4425 cfqd->cfq_target_latency = cfq_target_latency; 4426 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 4427 cfqd->cfq_slice_idle = cfq_slice_idle; 4428 cfqd->cfq_group_idle = cfq_group_idle; 4429 cfqd->cfq_latency = 1; 4430 cfqd->hw_tag = -1; 4431 /* 4432 * we optimistically start assuming sync ops weren't delayed in last 4433 * second, in order to have larger depth for async operations. 4434 */ 4435 cfqd->last_delayed_sync = jiffies - HZ; 4436 return 0; 4437 4438out_free: 4439 kfree(cfqd); 4440 return ret; 4441} 4442 4443/* 4444 * sysfs parts below --> 4445 */ 4446static ssize_t 4447cfq_var_show(unsigned int var, char *page) 4448{ 4449 return sprintf(page, "%d\n", var); 4450} 4451 4452static ssize_t 4453cfq_var_store(unsigned int *var, const char *page, size_t count) 4454{ 4455 char *p = (char *) page; 4456 4457 *var = simple_strtoul(p, &p, 10); 4458 return count; 4459} 4460 4461#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 4462static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 4463{ \ 4464 struct cfq_data *cfqd = e->elevator_data; \ 4465 unsigned int __data = __VAR; \ 4466 if (__CONV) \ 4467 __data = jiffies_to_msecs(__data); \ 4468 return cfq_var_show(__data, (page)); \ 4469} 4470SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 4471SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 4472SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 4473SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 4474SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 4475SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 4476SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); 4477SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4478SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4479SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4480SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 4481SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); 4482#undef SHOW_FUNCTION 4483 4484#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 4485static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 4486{ \ 4487 struct cfq_data *cfqd = e->elevator_data; \ 4488 unsigned int __data; \ 4489 int ret = cfq_var_store(&__data, (page), count); \ 4490 if (__data < (MIN)) \ 4491 __data = (MIN); \ 4492 else if (__data > (MAX)) \ 4493 __data = (MAX); \ 4494 if (__CONV) \ 4495 *(__PTR) = msecs_to_jiffies(__data); \ 4496 else \ 4497 *(__PTR) = __data; \ 4498 return ret; \ 4499} 4500STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 4501STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 4502 UINT_MAX, 1); 4503STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 4504 UINT_MAX, 1); 4505STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 4506STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4507 UINT_MAX, 0); 4508STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4509STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); 4510STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4511STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4512STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4513 UINT_MAX, 0); 4514STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 4515STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); 4516#undef STORE_FUNCTION 4517 4518#define CFQ_ATTR(name) \ 4519 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 4520 4521static struct elv_fs_entry cfq_attrs[] = { 4522 CFQ_ATTR(quantum), 4523 CFQ_ATTR(fifo_expire_sync), 4524 CFQ_ATTR(fifo_expire_async), 4525 CFQ_ATTR(back_seek_max), 4526 CFQ_ATTR(back_seek_penalty), 4527 CFQ_ATTR(slice_sync), 4528 CFQ_ATTR(slice_async), 4529 CFQ_ATTR(slice_async_rq), 4530 CFQ_ATTR(slice_idle), 4531 CFQ_ATTR(group_idle), 4532 CFQ_ATTR(low_latency), 4533 CFQ_ATTR(target_latency), 4534 __ATTR_NULL 4535}; 4536 4537static struct elevator_type iosched_cfq = { 4538 .ops = { 4539 .elevator_merge_fn = cfq_merge, 4540 .elevator_merged_fn = cfq_merged_request, 4541 .elevator_merge_req_fn = cfq_merged_requests, 4542 .elevator_allow_merge_fn = cfq_allow_merge, 4543 .elevator_bio_merged_fn = cfq_bio_merged, 4544 .elevator_dispatch_fn = cfq_dispatch_requests, 4545 .elevator_add_req_fn = cfq_insert_request, 4546 .elevator_activate_req_fn = cfq_activate_request, 4547 .elevator_deactivate_req_fn = cfq_deactivate_request, 4548 .elevator_completed_req_fn = cfq_completed_request, 4549 .elevator_former_req_fn = elv_rb_former_request, 4550 .elevator_latter_req_fn = elv_rb_latter_request, 4551 .elevator_init_icq_fn = cfq_init_icq, 4552 .elevator_exit_icq_fn = cfq_exit_icq, 4553 .elevator_set_req_fn = cfq_set_request, 4554 .elevator_put_req_fn = cfq_put_request, 4555 .elevator_may_queue_fn = cfq_may_queue, 4556 .elevator_init_fn = cfq_init_queue, 4557 .elevator_exit_fn = cfq_exit_queue, 4558 }, 4559 .icq_size = sizeof(struct cfq_io_cq), 4560 .icq_align = __alignof__(struct cfq_io_cq), 4561 .elevator_attrs = cfq_attrs, 4562 .elevator_name = "cfq", 4563 .elevator_owner = THIS_MODULE, 4564}; 4565 4566#ifdef CONFIG_CFQ_GROUP_IOSCHED 4567static struct blkcg_policy blkcg_policy_cfq = { 4568 .pd_size = sizeof(struct cfq_group), 4569 .cftypes = cfq_blkcg_files, 4570 4571 .pd_init_fn = cfq_pd_init, 4572 .pd_offline_fn = cfq_pd_offline, 4573 .pd_reset_stats_fn = cfq_pd_reset_stats, 4574}; 4575#endif 4576 4577static int __init cfq_init(void) 4578{ 4579 int ret; 4580 4581 /* 4582 * could be 0 on HZ < 1000 setups 4583 */ 4584 if (!cfq_slice_async) 4585 cfq_slice_async = 1; 4586 if (!cfq_slice_idle) 4587 cfq_slice_idle = 1; 4588 4589#ifdef CONFIG_CFQ_GROUP_IOSCHED 4590 if (!cfq_group_idle) 4591 cfq_group_idle = 1; 4592 4593 ret = blkcg_policy_register(&blkcg_policy_cfq); 4594 if (ret) 4595 return ret; 4596#else 4597 cfq_group_idle = 0; 4598#endif 4599 4600 ret = -ENOMEM; 4601 cfq_pool = KMEM_CACHE(cfq_queue, 0); 4602 if (!cfq_pool) 4603 goto err_pol_unreg; 4604 4605 ret = elv_register(&iosched_cfq); 4606 if (ret) 4607 goto err_free_pool; 4608 4609 return 0; 4610 4611err_free_pool: 4612 kmem_cache_destroy(cfq_pool); 4613err_pol_unreg: 4614#ifdef CONFIG_CFQ_GROUP_IOSCHED 4615 blkcg_policy_unregister(&blkcg_policy_cfq); 4616#endif 4617 return ret; 4618} 4619 4620static void __exit cfq_exit(void) 4621{ 4622#ifdef CONFIG_CFQ_GROUP_IOSCHED 4623 blkcg_policy_unregister(&blkcg_policy_cfq); 4624#endif 4625 elv_unregister(&iosched_cfq); 4626 kmem_cache_destroy(cfq_pool); 4627} 4628 4629module_init(cfq_init); 4630module_exit(cfq_exit); 4631 4632MODULE_AUTHOR("Jens Axboe"); 4633MODULE_LICENSE("GPL"); 4634MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 4635