backing-dev.c revision 6bf05d03ea8d00bb53e0642f94a5a6815be2edb6
1 2#include <linux/wait.h> 3#include <linux/backing-dev.h> 4#include <linux/kthread.h> 5#include <linux/freezer.h> 6#include <linux/fs.h> 7#include <linux/pagemap.h> 8#include <linux/mm.h> 9#include <linux/sched.h> 10#include <linux/module.h> 11#include <linux/writeback.h> 12#include <linux/device.h> 13#include <trace/events/writeback.h> 14 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16 17void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 18{ 19} 20EXPORT_SYMBOL(default_unplug_io_fn); 21 22struct backing_dev_info default_backing_dev_info = { 23 .name = "default", 24 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 25 .state = 0, 26 .capabilities = BDI_CAP_MAP_COPY, 27 .unplug_io_fn = default_unplug_io_fn, 28}; 29EXPORT_SYMBOL_GPL(default_backing_dev_info); 30 31struct backing_dev_info noop_backing_dev_info = { 32 .name = "noop", 33}; 34EXPORT_SYMBOL_GPL(noop_backing_dev_info); 35 36static struct class *bdi_class; 37 38/* 39 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as 40 * reader side protection for bdi_pending_list. bdi_list has RCU reader side 41 * locking. 42 */ 43DEFINE_SPINLOCK(bdi_lock); 44LIST_HEAD(bdi_list); 45LIST_HEAD(bdi_pending_list); 46 47static struct task_struct *sync_supers_tsk; 48static struct timer_list sync_supers_timer; 49 50static int bdi_sync_supers(void *); 51static void sync_supers_timer_fn(unsigned long); 52 53#ifdef CONFIG_DEBUG_FS 54#include <linux/debugfs.h> 55#include <linux/seq_file.h> 56 57static struct dentry *bdi_debug_root; 58 59static void bdi_debug_init(void) 60{ 61 bdi_debug_root = debugfs_create_dir("bdi", NULL); 62} 63 64static int bdi_debug_stats_show(struct seq_file *m, void *v) 65{ 66 struct backing_dev_info *bdi = m->private; 67 struct bdi_writeback *wb = &bdi->wb; 68 unsigned long background_thresh; 69 unsigned long dirty_thresh; 70 unsigned long bdi_thresh; 71 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 72 struct inode *inode; 73 74 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 75 spin_lock(&inode_lock); 76 list_for_each_entry(inode, &wb->b_dirty, i_list) 77 nr_dirty++; 78 list_for_each_entry(inode, &wb->b_io, i_list) 79 nr_io++; 80 list_for_each_entry(inode, &wb->b_more_io, i_list) 81 nr_more_io++; 82 spin_unlock(&inode_lock); 83 84 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); 85 86#define K(x) ((x) << (PAGE_SHIFT - 10)) 87 seq_printf(m, 88 "BdiWriteback: %8lu kB\n" 89 "BdiReclaimable: %8lu kB\n" 90 "BdiDirtyThresh: %8lu kB\n" 91 "DirtyThresh: %8lu kB\n" 92 "BackgroundThresh: %8lu kB\n" 93 "b_dirty: %8lu\n" 94 "b_io: %8lu\n" 95 "b_more_io: %8lu\n" 96 "bdi_list: %8u\n" 97 "state: %8lx\n", 98 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 99 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 100 K(bdi_thresh), K(dirty_thresh), 101 K(background_thresh), nr_dirty, nr_io, nr_more_io, 102 !list_empty(&bdi->bdi_list), bdi->state); 103#undef K 104 105 return 0; 106} 107 108static int bdi_debug_stats_open(struct inode *inode, struct file *file) 109{ 110 return single_open(file, bdi_debug_stats_show, inode->i_private); 111} 112 113static const struct file_operations bdi_debug_stats_fops = { 114 .open = bdi_debug_stats_open, 115 .read = seq_read, 116 .llseek = seq_lseek, 117 .release = single_release, 118}; 119 120static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) 121{ 122 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); 123 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, 124 bdi, &bdi_debug_stats_fops); 125} 126 127static void bdi_debug_unregister(struct backing_dev_info *bdi) 128{ 129 debugfs_remove(bdi->debug_stats); 130 debugfs_remove(bdi->debug_dir); 131} 132#else 133static inline void bdi_debug_init(void) 134{ 135} 136static inline void bdi_debug_register(struct backing_dev_info *bdi, 137 const char *name) 138{ 139} 140static inline void bdi_debug_unregister(struct backing_dev_info *bdi) 141{ 142} 143#endif 144 145static ssize_t read_ahead_kb_store(struct device *dev, 146 struct device_attribute *attr, 147 const char *buf, size_t count) 148{ 149 struct backing_dev_info *bdi = dev_get_drvdata(dev); 150 char *end; 151 unsigned long read_ahead_kb; 152 ssize_t ret = -EINVAL; 153 154 read_ahead_kb = simple_strtoul(buf, &end, 10); 155 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 156 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); 157 ret = count; 158 } 159 return ret; 160} 161 162#define K(pages) ((pages) << (PAGE_SHIFT - 10)) 163 164#define BDI_SHOW(name, expr) \ 165static ssize_t name##_show(struct device *dev, \ 166 struct device_attribute *attr, char *page) \ 167{ \ 168 struct backing_dev_info *bdi = dev_get_drvdata(dev); \ 169 \ 170 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ 171} 172 173BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) 174 175static ssize_t min_ratio_store(struct device *dev, 176 struct device_attribute *attr, const char *buf, size_t count) 177{ 178 struct backing_dev_info *bdi = dev_get_drvdata(dev); 179 char *end; 180 unsigned int ratio; 181 ssize_t ret = -EINVAL; 182 183 ratio = simple_strtoul(buf, &end, 10); 184 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 185 ret = bdi_set_min_ratio(bdi, ratio); 186 if (!ret) 187 ret = count; 188 } 189 return ret; 190} 191BDI_SHOW(min_ratio, bdi->min_ratio) 192 193static ssize_t max_ratio_store(struct device *dev, 194 struct device_attribute *attr, const char *buf, size_t count) 195{ 196 struct backing_dev_info *bdi = dev_get_drvdata(dev); 197 char *end; 198 unsigned int ratio; 199 ssize_t ret = -EINVAL; 200 201 ratio = simple_strtoul(buf, &end, 10); 202 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { 203 ret = bdi_set_max_ratio(bdi, ratio); 204 if (!ret) 205 ret = count; 206 } 207 return ret; 208} 209BDI_SHOW(max_ratio, bdi->max_ratio) 210 211#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) 212 213static struct device_attribute bdi_dev_attrs[] = { 214 __ATTR_RW(read_ahead_kb), 215 __ATTR_RW(min_ratio), 216 __ATTR_RW(max_ratio), 217 __ATTR_NULL, 218}; 219 220static __init int bdi_class_init(void) 221{ 222 bdi_class = class_create(THIS_MODULE, "bdi"); 223 if (IS_ERR(bdi_class)) 224 return PTR_ERR(bdi_class); 225 226 bdi_class->dev_attrs = bdi_dev_attrs; 227 bdi_debug_init(); 228 return 0; 229} 230postcore_initcall(bdi_class_init); 231 232static int __init default_bdi_init(void) 233{ 234 int err; 235 236 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); 237 BUG_ON(IS_ERR(sync_supers_tsk)); 238 239 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); 240 bdi_arm_supers_timer(); 241 242 err = bdi_init(&default_backing_dev_info); 243 if (!err) 244 bdi_register(&default_backing_dev_info, NULL, "default"); 245 246 return err; 247} 248subsys_initcall(default_bdi_init); 249 250int bdi_has_dirty_io(struct backing_dev_info *bdi) 251{ 252 return wb_has_dirty_io(&bdi->wb); 253} 254 255static void bdi_flush_io(struct backing_dev_info *bdi) 256{ 257 struct writeback_control wbc = { 258 .sync_mode = WB_SYNC_NONE, 259 .older_than_this = NULL, 260 .range_cyclic = 1, 261 .nr_to_write = 1024, 262 }; 263 264 writeback_inodes_wb(&bdi->wb, &wbc); 265} 266 267/* 268 * kupdated() used to do this. We cannot do it from the bdi_forker_thread() 269 * or we risk deadlocking on ->s_umount. The longer term solution would be 270 * to implement sync_supers_bdi() or similar and simply do it from the 271 * bdi writeback thread individually. 272 */ 273static int bdi_sync_supers(void *unused) 274{ 275 set_user_nice(current, 0); 276 277 while (!kthread_should_stop()) { 278 set_current_state(TASK_INTERRUPTIBLE); 279 schedule(); 280 281 /* 282 * Do this periodically, like kupdated() did before. 283 */ 284 sync_supers(); 285 } 286 287 return 0; 288} 289 290void bdi_arm_supers_timer(void) 291{ 292 unsigned long next; 293 294 if (!dirty_writeback_interval) 295 return; 296 297 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; 298 mod_timer(&sync_supers_timer, round_jiffies_up(next)); 299} 300 301static void sync_supers_timer_fn(unsigned long unused) 302{ 303 wake_up_process(sync_supers_tsk); 304 bdi_arm_supers_timer(); 305} 306 307static void wakeup_timer_fn(unsigned long data) 308{ 309 struct backing_dev_info *bdi = (struct backing_dev_info *)data; 310 311 spin_lock_bh(&bdi->wb_lock); 312 if (bdi->wb.task) { 313 trace_writeback_wake_thread(bdi); 314 wake_up_process(bdi->wb.task); 315 } else { 316 /* 317 * When bdi tasks are inactive for long time, they are killed. 318 * In this case we have to wake-up the forker thread which 319 * should create and run the bdi thread. 320 */ 321 trace_writeback_wake_forker_thread(bdi); 322 wake_up_process(default_backing_dev_info.wb.task); 323 } 324 spin_unlock_bh(&bdi->wb_lock); 325} 326 327/* 328 * This function is used when the first inode for this bdi is marked dirty. It 329 * wakes-up the corresponding bdi thread which should then take care of the 330 * periodic background write-out of dirty inodes. Since the write-out would 331 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just 332 * set up a timer which wakes the bdi thread up later. 333 * 334 * Note, we wouldn't bother setting up the timer, but this function is on the 335 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches 336 * by delaying the wake-up. 337 */ 338void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) 339{ 340 unsigned long timeout; 341 342 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 343 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); 344} 345 346/* 347 * Calculate the longest interval (jiffies) bdi threads are allowed to be 348 * inactive. 349 */ 350static unsigned long bdi_longest_inactive(void) 351{ 352 unsigned long interval; 353 354 interval = msecs_to_jiffies(dirty_writeback_interval * 10); 355 return max(5UL * 60 * HZ, interval); 356} 357 358static int bdi_forker_thread(void *ptr) 359{ 360 struct bdi_writeback *me = ptr; 361 362 current->flags |= PF_FLUSHER | PF_SWAPWRITE; 363 set_freezable(); 364 365 /* 366 * Our parent may run at a different priority, just set us to normal 367 */ 368 set_user_nice(current, 0); 369 370 for (;;) { 371 struct task_struct *task = NULL; 372 struct backing_dev_info *bdi; 373 enum { 374 NO_ACTION, /* Nothing to do */ 375 FORK_THREAD, /* Fork bdi thread */ 376 KILL_THREAD, /* Kill inactive bdi thread */ 377 } action = NO_ACTION; 378 379 /* 380 * Temporary measure, we want to make sure we don't see 381 * dirty data on the default backing_dev_info 382 */ 383 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { 384 del_timer(&me->wakeup_timer); 385 wb_do_writeback(me, 0); 386 } 387 388 spin_lock_bh(&bdi_lock); 389 set_current_state(TASK_INTERRUPTIBLE); 390 391 list_for_each_entry(bdi, &bdi_list, bdi_list) { 392 bool have_dirty_io; 393 394 if (!bdi_cap_writeback_dirty(bdi) || 395 bdi_cap_flush_forker(bdi)) 396 continue; 397 398 WARN(!test_bit(BDI_registered, &bdi->state), 399 "bdi %p/%s is not registered!\n", bdi, bdi->name); 400 401 have_dirty_io = !list_empty(&bdi->work_list) || 402 wb_has_dirty_io(&bdi->wb); 403 404 /* 405 * If the bdi has work to do, but the thread does not 406 * exist - create it. 407 */ 408 if (!bdi->wb.task && have_dirty_io) { 409 /* 410 * Set the pending bit - if someone will try to 411 * unregister this bdi - it'll wait on this bit. 412 */ 413 set_bit(BDI_pending, &bdi->state); 414 action = FORK_THREAD; 415 break; 416 } 417 418 spin_lock(&bdi->wb_lock); 419 420 /* 421 * If there is no work to do and the bdi thread was 422 * inactive long enough - kill it. The wb_lock is taken 423 * to make sure no-one adds more work to this bdi and 424 * wakes the bdi thread up. 425 */ 426 if (bdi->wb.task && !have_dirty_io && 427 time_after(jiffies, bdi->wb.last_active + 428 bdi_longest_inactive())) { 429 task = bdi->wb.task; 430 bdi->wb.task = NULL; 431 spin_unlock(&bdi->wb_lock); 432 set_bit(BDI_pending, &bdi->state); 433 action = KILL_THREAD; 434 break; 435 } 436 spin_unlock(&bdi->wb_lock); 437 } 438 spin_unlock_bh(&bdi_lock); 439 440 /* Keep working if default bdi still has things to do */ 441 if (!list_empty(&me->bdi->work_list)) 442 __set_current_state(TASK_RUNNING); 443 444 switch (action) { 445 case FORK_THREAD: 446 __set_current_state(TASK_RUNNING); 447 task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", 448 dev_name(bdi->dev)); 449 if (IS_ERR(task)) { 450 /* 451 * If thread creation fails, force writeout of 452 * the bdi from the thread. 453 */ 454 bdi_flush_io(bdi); 455 } else { 456 /* 457 * The spinlock makes sure we do not lose 458 * wake-ups when racing with 'bdi_queue_work()'. 459 */ 460 spin_lock_bh(&bdi->wb_lock); 461 bdi->wb.task = task; 462 spin_unlock_bh(&bdi->wb_lock); 463 } 464 break; 465 466 case KILL_THREAD: 467 __set_current_state(TASK_RUNNING); 468 kthread_stop(task); 469 break; 470 471 case NO_ACTION: 472 if (!wb_has_dirty_io(me) || !dirty_writeback_interval) 473 /* 474 * There are no dirty data. The only thing we 475 * should now care about is checking for 476 * inactive bdi threads and killing them. Thus, 477 * let's sleep for longer time, save energy and 478 * be friendly for battery-driven devices. 479 */ 480 schedule_timeout(bdi_longest_inactive()); 481 else 482 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 483 try_to_freeze(); 484 /* Back to the main loop */ 485 continue; 486 } 487 488 /* 489 * Clear pending bit and wakeup anybody waiting to tear us down. 490 */ 491 clear_bit(BDI_pending, &bdi->state); 492 smp_mb__after_clear_bit(); 493 wake_up_bit(&bdi->state, BDI_pending); 494 } 495 496 return 0; 497} 498 499/* 500 * Remove bdi from bdi_list, and ensure that it is no longer visible 501 */ 502static void bdi_remove_from_list(struct backing_dev_info *bdi) 503{ 504 spin_lock_bh(&bdi_lock); 505 list_del_rcu(&bdi->bdi_list); 506 spin_unlock_bh(&bdi_lock); 507 508 synchronize_rcu(); 509} 510 511int bdi_register(struct backing_dev_info *bdi, struct device *parent, 512 const char *fmt, ...) 513{ 514 va_list args; 515 struct device *dev; 516 517 if (bdi->dev) /* The driver needs to use separate queues per device */ 518 return 0; 519 520 va_start(args, fmt); 521 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 522 va_end(args); 523 if (IS_ERR(dev)) 524 return PTR_ERR(dev); 525 526 bdi->dev = dev; 527 528 /* 529 * Just start the forker thread for our default backing_dev_info, 530 * and add other bdi's to the list. They will get a thread created 531 * on-demand when they need it. 532 */ 533 if (bdi_cap_flush_forker(bdi)) { 534 struct bdi_writeback *wb = &bdi->wb; 535 536 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", 537 dev_name(dev)); 538 if (IS_ERR(wb->task)) 539 return PTR_ERR(wb->task); 540 } 541 542 bdi_debug_register(bdi, dev_name(dev)); 543 set_bit(BDI_registered, &bdi->state); 544 545 spin_lock_bh(&bdi_lock); 546 list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 547 spin_unlock_bh(&bdi_lock); 548 549 trace_writeback_bdi_register(bdi); 550 return 0; 551} 552EXPORT_SYMBOL(bdi_register); 553 554int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) 555{ 556 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); 557} 558EXPORT_SYMBOL(bdi_register_dev); 559 560/* 561 * Remove bdi from the global list and shutdown any threads we have running 562 */ 563static void bdi_wb_shutdown(struct backing_dev_info *bdi) 564{ 565 if (!bdi_cap_writeback_dirty(bdi)) 566 return; 567 568 /* 569 * Make sure nobody finds us on the bdi_list anymore 570 */ 571 bdi_remove_from_list(bdi); 572 573 /* 574 * If setup is pending, wait for that to complete first 575 */ 576 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, 577 TASK_UNINTERRUPTIBLE); 578 579 /* 580 * Finally, kill the kernel thread. We don't need to be RCU 581 * safe anymore, since the bdi is gone from visibility. Force 582 * unfreeze of the thread before calling kthread_stop(), otherwise 583 * it would never exet if it is currently stuck in the refrigerator. 584 */ 585 if (bdi->wb.task) { 586 thaw_process(bdi->wb.task); 587 kthread_stop(bdi->wb.task); 588 } 589} 590 591/* 592 * This bdi is going away now, make sure that no super_blocks point to it 593 */ 594static void bdi_prune_sb(struct backing_dev_info *bdi) 595{ 596 struct super_block *sb; 597 598 spin_lock(&sb_lock); 599 list_for_each_entry(sb, &super_blocks, s_list) { 600 if (sb->s_bdi == bdi) 601 sb->s_bdi = NULL; 602 } 603 spin_unlock(&sb_lock); 604} 605 606void bdi_unregister(struct backing_dev_info *bdi) 607{ 608 if (bdi->dev) { 609 trace_writeback_bdi_unregister(bdi); 610 bdi_prune_sb(bdi); 611 del_timer_sync(&bdi->wb.wakeup_timer); 612 613 if (!bdi_cap_flush_forker(bdi)) 614 bdi_wb_shutdown(bdi); 615 bdi_debug_unregister(bdi); 616 device_unregister(bdi->dev); 617 bdi->dev = NULL; 618 } 619} 620EXPORT_SYMBOL(bdi_unregister); 621 622static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 623{ 624 memset(wb, 0, sizeof(*wb)); 625 626 wb->bdi = bdi; 627 wb->last_old_flush = jiffies; 628 INIT_LIST_HEAD(&wb->b_dirty); 629 INIT_LIST_HEAD(&wb->b_io); 630 INIT_LIST_HEAD(&wb->b_more_io); 631 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); 632} 633 634int bdi_init(struct backing_dev_info *bdi) 635{ 636 int i, err; 637 638 bdi->dev = NULL; 639 640 bdi->min_ratio = 0; 641 bdi->max_ratio = 100; 642 bdi->max_prop_frac = PROP_FRAC_BASE; 643 spin_lock_init(&bdi->wb_lock); 644 INIT_LIST_HEAD(&bdi->bdi_list); 645 INIT_LIST_HEAD(&bdi->work_list); 646 647 bdi_wb_init(&bdi->wb, bdi); 648 649 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 650 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 651 if (err) 652 goto err; 653 } 654 655 bdi->dirty_exceeded = 0; 656 err = prop_local_init_percpu(&bdi->completions); 657 658 if (err) { 659err: 660 while (i--) 661 percpu_counter_destroy(&bdi->bdi_stat[i]); 662 } 663 664 return err; 665} 666EXPORT_SYMBOL(bdi_init); 667 668void bdi_destroy(struct backing_dev_info *bdi) 669{ 670 int i; 671 672 /* 673 * Splice our entries to the default_backing_dev_info, if this 674 * bdi disappears 675 */ 676 if (bdi_has_dirty_io(bdi)) { 677 struct bdi_writeback *dst = &default_backing_dev_info.wb; 678 679 spin_lock(&inode_lock); 680 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 681 list_splice(&bdi->wb.b_io, &dst->b_io); 682 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 683 spin_unlock(&inode_lock); 684 } 685 686 bdi_unregister(bdi); 687 688 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 689 percpu_counter_destroy(&bdi->bdi_stat[i]); 690 691 prop_local_destroy_percpu(&bdi->completions); 692} 693EXPORT_SYMBOL(bdi_destroy); 694 695/* 696 * For use from filesystems to quickly init and register a bdi associated 697 * with dirty writeback 698 */ 699int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, 700 unsigned int cap) 701{ 702 char tmp[32]; 703 int err; 704 705 bdi->name = name; 706 bdi->capabilities = cap; 707 err = bdi_init(bdi); 708 if (err) 709 return err; 710 711 sprintf(tmp, "%.28s%s", name, "-%d"); 712 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); 713 if (err) { 714 bdi_destroy(bdi); 715 return err; 716 } 717 718 return 0; 719} 720EXPORT_SYMBOL(bdi_setup_and_register); 721 722static wait_queue_head_t congestion_wqh[2] = { 723 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), 724 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) 725 }; 726 727void clear_bdi_congested(struct backing_dev_info *bdi, int sync) 728{ 729 enum bdi_state bit; 730 wait_queue_head_t *wqh = &congestion_wqh[sync]; 731 732 bit = sync ? BDI_sync_congested : BDI_async_congested; 733 clear_bit(bit, &bdi->state); 734 smp_mb__after_clear_bit(); 735 if (waitqueue_active(wqh)) 736 wake_up(wqh); 737} 738EXPORT_SYMBOL(clear_bdi_congested); 739 740void set_bdi_congested(struct backing_dev_info *bdi, int sync) 741{ 742 enum bdi_state bit; 743 744 bit = sync ? BDI_sync_congested : BDI_async_congested; 745 set_bit(bit, &bdi->state); 746} 747EXPORT_SYMBOL(set_bdi_congested); 748 749/** 750 * congestion_wait - wait for a backing_dev to become uncongested 751 * @sync: SYNC or ASYNC IO 752 * @timeout: timeout in jiffies 753 * 754 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit 755 * write congestion. If no backing_devs are congested then just wait for the 756 * next write to be completed. 757 */ 758long congestion_wait(int sync, long timeout) 759{ 760 long ret; 761 DEFINE_WAIT(wait); 762 wait_queue_head_t *wqh = &congestion_wqh[sync]; 763 764 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 765 ret = io_schedule_timeout(timeout); 766 finish_wait(wqh, &wait); 767 return ret; 768} 769EXPORT_SYMBOL(congestion_wait); 770 771