backing-dev.c revision d46db3d58233be4be980eb1e42eebe7808bcabab
1 2#include <linux/wait.h> 3#include <linux/backing-dev.h> 4#include <linux/kthread.h> 5#include <linux/freezer.h> 6#include <linux/fs.h> 7#include <linux/pagemap.h> 8#include <linux/mm.h> 9#include <linux/sched.h> 10#include <linux/module.h> 11#include <linux/writeback.h> 12#include <linux/device.h> 13#include <trace/events/writeback.h> 14 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16 17struct backing_dev_info default_backing_dev_info = { 18 .name = "default", 19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 20 .state = 0, 21 .capabilities = BDI_CAP_MAP_COPY, 22}; 23EXPORT_SYMBOL_GPL(default_backing_dev_info); 24 25struct backing_dev_info noop_backing_dev_info = { 26 .name = "noop", 27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 28}; 29EXPORT_SYMBOL_GPL(noop_backing_dev_info); 30 31static struct class *bdi_class; 32 33/* 34 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as 35 * reader side protection for bdi_pending_list. bdi_list has RCU reader side 36 * locking. 37 */ 38DEFINE_SPINLOCK(bdi_lock); 39LIST_HEAD(bdi_list); 40LIST_HEAD(bdi_pending_list); 41 42static struct task_struct *sync_supers_tsk; 43static struct timer_list sync_supers_timer; 44 45static int bdi_sync_supers(void *); 46static void sync_supers_timer_fn(unsigned long); 47 48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) 49{ 50 if (wb1 < wb2) { 51 spin_lock(&wb1->list_lock); 52 spin_lock_nested(&wb2->list_lock, 1); 53 } else { 54 spin_lock(&wb2->list_lock); 55 spin_lock_nested(&wb1->list_lock, 1); 56 } 57} 58 59#ifdef CONFIG_DEBUG_FS 60#include <linux/debugfs.h> 61#include <linux/seq_file.h> 62 63static struct dentry *bdi_debug_root; 64 65static void bdi_debug_init(void) 66{ 67 bdi_debug_root = debugfs_create_dir("bdi", NULL); 68} 69 70static int bdi_debug_stats_show(struct seq_file *m, void *v) 71{ 72 struct backing_dev_info *bdi = m->private; 73 struct bdi_writeback *wb = &bdi->wb; 74 unsigned long background_thresh; 75 unsigned long dirty_thresh; 76 unsigned long bdi_thresh; 77 unsigned long nr_dirty, nr_io, nr_more_io; 78 struct inode *inode; 79 80 nr_dirty = nr_io = nr_more_io = 0; 81 spin_lock(&wb->list_lock); 82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 83 nr_dirty++; 84 list_for_each_entry(inode, &wb->b_io, i_wb_list) 85 nr_io++; 86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 87 nr_more_io++; 88 spin_unlock(&wb->list_lock); 89 90 global_dirty_limits(&background_thresh, &dirty_thresh); 91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 92 93#define K(x) ((x) << (PAGE_SHIFT - 10)) 94 seq_printf(m, 95 "BdiWriteback: %8lu kB\n" 96 "BdiReclaimable: %8lu kB\n" 97 "BdiDirtyThresh: %8lu kB\n" 98 "DirtyThresh: %8lu kB\n" 99 "BackgroundThresh: %8lu kB\n" 100 "b_dirty: %8lu\n" 101 "b_io: %8lu\n" 102 "b_more_io: %8lu\n" 103 "bdi_list: %8u\n" 104 "state: %8lx\n", 105 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 106 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 107 K(bdi_thresh), K(dirty_thresh), 108 K(background_thresh), nr_dirty, nr_io, nr_more_io, 109 !list_empty(&bdi->bdi_list), bdi->state); 110#undef K 111 112 return 0; 113} 114 115static int bdi_debug_stats_open(struct inode *inode, struct file *file) 116{ 117 return single_open(file, bdi_debug_stats_show, inode->i_private); 118} 119 120static const struct file_operations bdi_debug_stats_fops = { 121 .open = bdi_debug_stats_open, 122 .read = seq_read, 123 .llseek = seq_lseek, 124 .release = single_release, 125}; 126 127static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) 128{ 129 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); 130 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, 131 bdi, &bdi_debug_stats_fops); 132} 133 134static void bdi_debug_unregister(struct backing_dev_info *bdi) 135{ 136 debugfs_remove(bdi->debug_stats); 137 debugfs_remove(bdi->debug_dir); 138} 139#else 140static inline void bdi_debug_init(void) 141{ 142} 143static inline void bdi_debug_register(struct backing_dev_info *bdi, 144 const char *name) 145{ 146} 147static inline void bdi_debug_unregister(struct backing_dev_info *bdi) 148{ 149} 150#endif 151 152static ssize_t read_ahead_kb_store(struct device *dev, 153 struct device_attribute *attr, 154 const char *buf, size_t count) 155{ 156 struct backing_dev_info *bdi = dev_get_drvdata(dev); 157 char *end; 158 unsigned long read_ahead_kb; 159 ssize_t ret = -EINVAL; 160 161 read_ahead_kb = simple_strtoul(buf, &end, 10); 162 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 163 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); 164 ret = count; 165 } 166 return ret; 167} 168 169#define K(pages) ((pages) << (PAGE_SHIFT - 10)) 170 171#define BDI_SHOW(name, expr) \ 172static ssize_t name##_show(struct device *dev, \ 173 struct device_attribute *attr, char *page) \ 174{ \ 175 struct backing_dev_info *bdi = dev_get_drvdata(dev); \ 176 \ 177 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ 178} 179 180BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) 181 182static ssize_t min_ratio_store(struct device *dev, 183 struct device_attribute *attr, const char *buf, size_t count) 184{ 185 struct backing_dev_info *bdi = dev_get_drvdata(dev); 186 char *end; 187 unsigned int ratio; 188 ssize_t ret = -EINVAL; 189 190 ratio = simple_strtoul(buf, &end, 10); 191 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 192 ret = bdi_set_min_ratio(bdi, ratio); 193 if (!ret) 194 ret = count; 195 } 196 return ret; 197} 198BDI_SHOW(min_ratio, bdi->min_ratio) 199 200static ssize_t max_ratio_store(struct device *dev, 201 struct device_attribute *attr, const char *buf, size_t count) 202{ 203 struct backing_dev_info *bdi = dev_get_drvdata(dev); 204 char *end; 205 unsigned int ratio; 206 ssize_t ret = -EINVAL; 207 208 ratio = simple_strtoul(buf, &end, 10); 209 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 210 ret = bdi_set_max_ratio(bdi, ratio); 211 if (!ret) 212 ret = count; 213 } 214 return ret; 215} 216BDI_SHOW(max_ratio, bdi->max_ratio) 217 218#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) 219 220static struct device_attribute bdi_dev_attrs[] = { 221 __ATTR_RW(read_ahead_kb), 222 __ATTR_RW(min_ratio), 223 __ATTR_RW(max_ratio), 224 __ATTR_NULL, 225}; 226 227static __init int bdi_class_init(void) 228{ 229 bdi_class = class_create(THIS_MODULE, "bdi"); 230 if (IS_ERR(bdi_class)) 231 return PTR_ERR(bdi_class); 232 233 bdi_class->dev_attrs = bdi_dev_attrs; 234 bdi_debug_init(); 235 return 0; 236} 237postcore_initcall(bdi_class_init); 238 239static int __init default_bdi_init(void) 240{ 241 int err; 242 243 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); 244 BUG_ON(IS_ERR(sync_supers_tsk)); 245 246 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); 247 bdi_arm_supers_timer(); 248 249 err = bdi_init(&default_backing_dev_info); 250 if (!err) 251 bdi_register(&default_backing_dev_info, NULL, "default"); 252 err = bdi_init(&noop_backing_dev_info); 253 254 return err; 255} 256subsys_initcall(default_bdi_init); 257 258int bdi_has_dirty_io(struct backing_dev_info *bdi) 259{ 260 return wb_has_dirty_io(&bdi->wb); 261} 262 263/* 264 * kupdated() used to do this. We cannot do it from the bdi_forker_thread() 265 * or we risk deadlocking on ->s_umount. The longer term solution would be 266 * to implement sync_supers_bdi() or similar and simply do it from the 267 * bdi writeback thread individually. 268 */ 269static int bdi_sync_supers(void *unused) 270{ 271 set_user_nice(current, 0); 272 273 while (!kthread_should_stop()) { 274 set_current_state(TASK_INTERRUPTIBLE); 275 schedule(); 276 277 /* 278 * Do this periodically, like kupdated() did before. 279 */ 280 sync_supers(); 281 } 282 283 return 0; 284} 285 286void bdi_arm_supers_timer(void) 287{ 288 unsigned long next; 289 290 if (!dirty_writeback_interval) 291 return; 292 293 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; 294 mod_timer(&sync_supers_timer, round_jiffies_up(next)); 295} 296 297static void sync_supers_timer_fn(unsigned long unused) 298{ 299 wake_up_process(sync_supers_tsk); 300 bdi_arm_supers_timer(); 301} 302 303static void wakeup_timer_fn(unsigned long data) 304{ 305 struct backing_dev_info *bdi = (struct backing_dev_info *)data; 306 307 spin_lock_bh(&bdi->wb_lock); 308 if (bdi->wb.task) { 309 trace_writeback_wake_thread(bdi); 310 wake_up_process(bdi->wb.task); 311 } else { 312 /* 313 * When bdi tasks are inactive for long time, they are killed. 314 * In this case we have to wake-up the forker thread which 315 * should create and run the bdi thread. 316 */ 317 trace_writeback_wake_forker_thread(bdi); 318 wake_up_process(default_backing_dev_info.wb.task); 319 } 320 spin_unlock_bh(&bdi->wb_lock); 321} 322 323/* 324 * This function is used when the first inode for this bdi is marked dirty. It 325 * wakes-up the corresponding bdi thread which should then take care of the 326 * periodic background write-out of dirty inodes. Since the write-out would 327 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just 328 * set up a timer which wakes the bdi thread up later. 329 * 330 * Note, we wouldn't bother setting up the timer, but this function is on the 331 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches 332 * by delaying the wake-up. 333 */ 334void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) 335{ 336 unsigned long timeout; 337 338 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 339 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); 340} 341 342/* 343 * Calculate the longest interval (jiffies) bdi threads are allowed to be 344 * inactive. 345 */ 346static unsigned long bdi_longest_inactive(void) 347{ 348 unsigned long interval; 349 350 interval = msecs_to_jiffies(dirty_writeback_interval * 10); 351 return max(5UL * 60 * HZ, interval); 352} 353 354static int bdi_forker_thread(void *ptr) 355{ 356 struct bdi_writeback *me = ptr; 357 358 current->flags |= PF_SWAPWRITE; 359 set_freezable(); 360 361 /* 362 * Our parent may run at a different priority, just set us to normal 363 */ 364 set_user_nice(current, 0); 365 366 for (;;) { 367 struct task_struct *task = NULL; 368 struct backing_dev_info *bdi; 369 enum { 370 NO_ACTION, /* Nothing to do */ 371 FORK_THREAD, /* Fork bdi thread */ 372 KILL_THREAD, /* Kill inactive bdi thread */ 373 } action = NO_ACTION; 374 375 /* 376 * Temporary measure, we want to make sure we don't see 377 * dirty data on the default backing_dev_info 378 */ 379 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { 380 del_timer(&me->wakeup_timer); 381 wb_do_writeback(me, 0); 382 } 383 384 spin_lock_bh(&bdi_lock); 385 set_current_state(TASK_INTERRUPTIBLE); 386 387 list_for_each_entry(bdi, &bdi_list, bdi_list) { 388 bool have_dirty_io; 389 390 if (!bdi_cap_writeback_dirty(bdi) || 391 bdi_cap_flush_forker(bdi)) 392 continue; 393 394 WARN(!test_bit(BDI_registered, &bdi->state), 395 "bdi %p/%s is not registered!\n", bdi, bdi->name); 396 397 have_dirty_io = !list_empty(&bdi->work_list) || 398 wb_has_dirty_io(&bdi->wb); 399 400 /* 401 * If the bdi has work to do, but the thread does not 402 * exist - create it. 403 */ 404 if (!bdi->wb.task && have_dirty_io) { 405 /* 406 * Set the pending bit - if someone will try to 407 * unregister this bdi - it'll wait on this bit. 408 */ 409 set_bit(BDI_pending, &bdi->state); 410 action = FORK_THREAD; 411 break; 412 } 413 414 spin_lock(&bdi->wb_lock); 415 416 /* 417 * If there is no work to do and the bdi thread was 418 * inactive long enough - kill it. The wb_lock is taken 419 * to make sure no-one adds more work to this bdi and 420 * wakes the bdi thread up. 421 */ 422 if (bdi->wb.task && !have_dirty_io && 423 time_after(jiffies, bdi->wb.last_active + 424 bdi_longest_inactive())) { 425 task = bdi->wb.task; 426 bdi->wb.task = NULL; 427 spin_unlock(&bdi->wb_lock); 428 set_bit(BDI_pending, &bdi->state); 429 action = KILL_THREAD; 430 break; 431 } 432 spin_unlock(&bdi->wb_lock); 433 } 434 spin_unlock_bh(&bdi_lock); 435 436 /* Keep working if default bdi still has things to do */ 437 if (!list_empty(&me->bdi->work_list)) 438 __set_current_state(TASK_RUNNING); 439 440 switch (action) { 441 case FORK_THREAD: 442 __set_current_state(TASK_RUNNING); 443 task = kthread_create(bdi_writeback_thread, &bdi->wb, 444 "flush-%s", dev_name(bdi->dev)); 445 if (IS_ERR(task)) { 446 /* 447 * If thread creation fails, force writeout of 448 * the bdi from the thread. Hopefully 1024 is 449 * large enough for efficient IO. 450 */ 451 writeback_inodes_wb(&bdi->wb, 1024); 452 } else { 453 /* 454 * The spinlock makes sure we do not lose 455 * wake-ups when racing with 'bdi_queue_work()'. 456 * And as soon as the bdi thread is visible, we 457 * can start it. 458 */ 459 spin_lock_bh(&bdi->wb_lock); 460 bdi->wb.task = task; 461 spin_unlock_bh(&bdi->wb_lock); 462 wake_up_process(task); 463 } 464 break; 465 466 case KILL_THREAD: 467 __set_current_state(TASK_RUNNING); 468 kthread_stop(task); 469 break; 470 471 case NO_ACTION: 472 if (!wb_has_dirty_io(me) || !dirty_writeback_interval) 473 /* 474 * There are no dirty data. The only thing we 475 * should now care about is checking for 476 * inactive bdi threads and killing them. Thus, 477 * let's sleep for longer time, save energy and 478 * be friendly for battery-driven devices. 479 */ 480 schedule_timeout(bdi_longest_inactive()); 481 else 482 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 483 try_to_freeze(); 484 /* Back to the main loop */ 485 continue; 486 } 487 488 /* 489 * Clear pending bit and wakeup anybody waiting to tear us down. 490 */ 491 clear_bit(BDI_pending, &bdi->state); 492 smp_mb__after_clear_bit(); 493 wake_up_bit(&bdi->state, BDI_pending); 494 } 495 496 return 0; 497} 498 499/* 500 * Remove bdi from bdi_list, and ensure that it is no longer visible 501 */ 502static void bdi_remove_from_list(struct backing_dev_info *bdi) 503{ 504 spin_lock_bh(&bdi_lock); 505 list_del_rcu(&bdi->bdi_list); 506 spin_unlock_bh(&bdi_lock); 507 508 synchronize_rcu(); 509} 510 511int bdi_register(struct backing_dev_info *bdi, struct device *parent, 512 const char *fmt, ...) 513{ 514 va_list args; 515 struct device *dev; 516 517 if (bdi->dev) /* The driver needs to use separate queues per device */ 518 return 0; 519 520 va_start(args, fmt); 521 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 522 va_end(args); 523 if (IS_ERR(dev)) 524 return PTR_ERR(dev); 525 526 bdi->dev = dev; 527 528 /* 529 * Just start the forker thread for our default backing_dev_info, 530 * and add other bdi's to the list. They will get a thread created 531 * on-demand when they need it. 532 */ 533 if (bdi_cap_flush_forker(bdi)) { 534 struct bdi_writeback *wb = &bdi->wb; 535 536 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", 537 dev_name(dev)); 538 if (IS_ERR(wb->task)) 539 return PTR_ERR(wb->task); 540 } 541 542 bdi_debug_register(bdi, dev_name(dev)); 543 set_bit(BDI_registered, &bdi->state); 544 545 spin_lock_bh(&bdi_lock); 546 list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 547 spin_unlock_bh(&bdi_lock); 548 549 trace_writeback_bdi_register(bdi); 550 return 0; 551} 552EXPORT_SYMBOL(bdi_register); 553 554int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) 555{ 556 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); 557} 558EXPORT_SYMBOL(bdi_register_dev); 559 560/* 561 * Remove bdi from the global list and shutdown any threads we have running 562 */ 563static void bdi_wb_shutdown(struct backing_dev_info *bdi) 564{ 565 if (!bdi_cap_writeback_dirty(bdi)) 566 return; 567 568 /* 569 * Make sure nobody finds us on the bdi_list anymore 570 */ 571 bdi_remove_from_list(bdi); 572 573 /* 574 * If setup is pending, wait for that to complete first 575 */ 576 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, 577 TASK_UNINTERRUPTIBLE); 578 579 /* 580 * Finally, kill the kernel thread. We don't need to be RCU 581 * safe anymore, since the bdi is gone from visibility. Force 582 * unfreeze of the thread before calling kthread_stop(), otherwise 583 * it would never exet if it is currently stuck in the refrigerator. 584 */ 585 if (bdi->wb.task) { 586 thaw_process(bdi->wb.task); 587 kthread_stop(bdi->wb.task); 588 } 589} 590 591/* 592 * This bdi is going away now, make sure that no super_blocks point to it 593 */ 594static void bdi_prune_sb(struct backing_dev_info *bdi) 595{ 596 struct super_block *sb; 597 598 spin_lock(&sb_lock); 599 list_for_each_entry(sb, &super_blocks, s_list) { 600 if (sb->s_bdi == bdi) 601 sb->s_bdi = &default_backing_dev_info; 602 } 603 spin_unlock(&sb_lock); 604} 605 606void bdi_unregister(struct backing_dev_info *bdi) 607{ 608 if (bdi->dev) { 609 trace_writeback_bdi_unregister(bdi); 610 bdi_prune_sb(bdi); 611 del_timer_sync(&bdi->wb.wakeup_timer); 612 613 if (!bdi_cap_flush_forker(bdi)) 614 bdi_wb_shutdown(bdi); 615 bdi_debug_unregister(bdi); 616 device_unregister(bdi->dev); 617 bdi->dev = NULL; 618 } 619} 620EXPORT_SYMBOL(bdi_unregister); 621 622static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 623{ 624 memset(wb, 0, sizeof(*wb)); 625 626 wb->bdi = bdi; 627 wb->last_old_flush = jiffies; 628 INIT_LIST_HEAD(&wb->b_dirty); 629 INIT_LIST_HEAD(&wb->b_io); 630 INIT_LIST_HEAD(&wb->b_more_io); 631 spin_lock_init(&wb->list_lock); 632 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); 633} 634 635int bdi_init(struct backing_dev_info *bdi) 636{ 637 int i, err; 638 639 bdi->dev = NULL; 640 641 bdi->min_ratio = 0; 642 bdi->max_ratio = 100; 643 bdi->max_prop_frac = PROP_FRAC_BASE; 644 spin_lock_init(&bdi->wb_lock); 645 INIT_LIST_HEAD(&bdi->bdi_list); 646 INIT_LIST_HEAD(&bdi->work_list); 647 648 bdi_wb_init(&bdi->wb, bdi); 649 650 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 651 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 652 if (err) 653 goto err; 654 } 655 656 bdi->dirty_exceeded = 0; 657 err = prop_local_init_percpu(&bdi->completions); 658 659 if (err) { 660err: 661 while (i--) 662 percpu_counter_destroy(&bdi->bdi_stat[i]); 663 } 664 665 return err; 666} 667EXPORT_SYMBOL(bdi_init); 668 669void bdi_destroy(struct backing_dev_info *bdi) 670{ 671 int i; 672 673 /* 674 * Splice our entries to the default_backing_dev_info, if this 675 * bdi disappears 676 */ 677 if (bdi_has_dirty_io(bdi)) { 678 struct bdi_writeback *dst = &default_backing_dev_info.wb; 679 680 bdi_lock_two(&bdi->wb, dst); 681 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 682 list_splice(&bdi->wb.b_io, &dst->b_io); 683 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 684 spin_unlock(&bdi->wb.list_lock); 685 spin_unlock(&dst->list_lock); 686 } 687 688 bdi_unregister(bdi); 689 690 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 691 percpu_counter_destroy(&bdi->bdi_stat[i]); 692 693 prop_local_destroy_percpu(&bdi->completions); 694} 695EXPORT_SYMBOL(bdi_destroy); 696 697/* 698 * For use from filesystems to quickly init and register a bdi associated 699 * with dirty writeback 700 */ 701int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, 702 unsigned int cap) 703{ 704 char tmp[32]; 705 int err; 706 707 bdi->name = name; 708 bdi->capabilities = cap; 709 err = bdi_init(bdi); 710 if (err) 711 return err; 712 713 sprintf(tmp, "%.28s%s", name, "-%d"); 714 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); 715 if (err) { 716 bdi_destroy(bdi); 717 return err; 718 } 719 720 return 0; 721} 722EXPORT_SYMBOL(bdi_setup_and_register); 723 724static wait_queue_head_t congestion_wqh[2] = { 725 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), 726 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) 727 }; 728static atomic_t nr_bdi_congested[2]; 729 730void clear_bdi_congested(struct backing_dev_info *bdi, int sync) 731{ 732 enum bdi_state bit; 733 wait_queue_head_t *wqh = &congestion_wqh[sync]; 734 735 bit = sync ? BDI_sync_congested : BDI_async_congested; 736 if (test_and_clear_bit(bit, &bdi->state)) 737 atomic_dec(&nr_bdi_congested[sync]); 738 smp_mb__after_clear_bit(); 739 if (waitqueue_active(wqh)) 740 wake_up(wqh); 741} 742EXPORT_SYMBOL(clear_bdi_congested); 743 744void set_bdi_congested(struct backing_dev_info *bdi, int sync) 745{ 746 enum bdi_state bit; 747 748 bit = sync ? BDI_sync_congested : BDI_async_congested; 749 if (!test_and_set_bit(bit, &bdi->state)) 750 atomic_inc(&nr_bdi_congested[sync]); 751} 752EXPORT_SYMBOL(set_bdi_congested); 753 754/** 755 * congestion_wait - wait for a backing_dev to become uncongested 756 * @sync: SYNC or ASYNC IO 757 * @timeout: timeout in jiffies 758 * 759 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit 760 * write congestion. If no backing_devs are congested then just wait for the 761 * next write to be completed. 762 */ 763long congestion_wait(int sync, long timeout) 764{ 765 long ret; 766 unsigned long start = jiffies; 767 DEFINE_WAIT(wait); 768 wait_queue_head_t *wqh = &congestion_wqh[sync]; 769 770 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 771 ret = io_schedule_timeout(timeout); 772 finish_wait(wqh, &wait); 773 774 trace_writeback_congestion_wait(jiffies_to_usecs(timeout), 775 jiffies_to_usecs(jiffies - start)); 776 777 return ret; 778} 779EXPORT_SYMBOL(congestion_wait); 780 781/** 782 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes 783 * @zone: A zone to check if it is heavily congested 784 * @sync: SYNC or ASYNC IO 785 * @timeout: timeout in jiffies 786 * 787 * In the event of a congested backing_dev (any backing_dev) and the given 788 * @zone has experienced recent congestion, this waits for up to @timeout 789 * jiffies for either a BDI to exit congestion of the given @sync queue 790 * or a write to complete. 791 * 792 * In the absence of zone congestion, cond_resched() is called to yield 793 * the processor if necessary but otherwise does not sleep. 794 * 795 * The return value is 0 if the sleep is for the full timeout. Otherwise, 796 * it is the number of jiffies that were still remaining when the function 797 * returned. return_value == timeout implies the function did not sleep. 798 */ 799long wait_iff_congested(struct zone *zone, int sync, long timeout) 800{ 801 long ret; 802 unsigned long start = jiffies; 803 DEFINE_WAIT(wait); 804 wait_queue_head_t *wqh = &congestion_wqh[sync]; 805 806 /* 807 * If there is no congestion, or heavy congestion is not being 808 * encountered in the current zone, yield if necessary instead 809 * of sleeping on the congestion queue 810 */ 811 if (atomic_read(&nr_bdi_congested[sync]) == 0 || 812 !zone_is_reclaim_congested(zone)) { 813 cond_resched(); 814 815 /* In case we scheduled, work out time remaining */ 816 ret = timeout - (jiffies - start); 817 if (ret < 0) 818 ret = 0; 819 820 goto out; 821 } 822 823 /* Sleep until uncongested or a write happens */ 824 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 825 ret = io_schedule_timeout(timeout); 826 finish_wait(wqh, &wait); 827 828out: 829 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), 830 jiffies_to_usecs(jiffies - start)); 831 832 return ret; 833} 834EXPORT_SYMBOL(wait_iff_congested); 835