dm-raid1.c revision 9c4376de98719d2768dd919553843de34bb094a6
1/* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm-bio-record.h" 9 10#include <linux/init.h> 11#include <linux/mempool.h> 12#include <linux/module.h> 13#include <linux/pagemap.h> 14#include <linux/slab.h> 15#include <linux/workqueue.h> 16#include <linux/device-mapper.h> 17#include <linux/dm-io.h> 18#include <linux/dm-dirty-log.h> 19#include <linux/dm-kcopyd.h> 20#include <linux/dm-region-hash.h> 21 22#define DM_MSG_PREFIX "raid1" 23 24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 25#define DM_IO_PAGES 64 26#define DM_KCOPYD_PAGES 64 27 28#define DM_RAID1_HANDLE_ERRORS 0x01 29#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 30 31static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 32 33/*----------------------------------------------------------------- 34 * Mirror set structures. 35 *---------------------------------------------------------------*/ 36enum dm_raid1_error { 37 DM_RAID1_WRITE_ERROR, 38 DM_RAID1_FLUSH_ERROR, 39 DM_RAID1_SYNC_ERROR, 40 DM_RAID1_READ_ERROR 41}; 42 43struct mirror { 44 struct mirror_set *ms; 45 atomic_t error_count; 46 unsigned long error_type; 47 struct dm_dev *dev; 48 sector_t offset; 49}; 50 51struct mirror_set { 52 struct dm_target *ti; 53 struct list_head list; 54 55 uint64_t features; 56 57 spinlock_t lock; /* protects the lists */ 58 struct bio_list reads; 59 struct bio_list writes; 60 struct bio_list failures; 61 struct bio_list holds; /* bios are waiting until suspend */ 62 63 struct dm_region_hash *rh; 64 struct dm_kcopyd_client *kcopyd_client; 65 struct dm_io_client *io_client; 66 mempool_t *read_record_pool; 67 68 /* recovery */ 69 region_t nr_regions; 70 int in_sync; 71 int log_failure; 72 int leg_failure; 73 atomic_t suspend; 74 75 atomic_t default_mirror; /* Default mirror */ 76 77 struct workqueue_struct *kmirrord_wq; 78 struct work_struct kmirrord_work; 79 struct timer_list timer; 80 unsigned long timer_pending; 81 82 struct work_struct trigger_event; 83 84 unsigned nr_mirrors; 85 struct mirror mirror[0]; 86}; 87 88static void wakeup_mirrord(void *context) 89{ 90 struct mirror_set *ms = context; 91 92 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 93} 94 95static void delayed_wake_fn(unsigned long data) 96{ 97 struct mirror_set *ms = (struct mirror_set *) data; 98 99 clear_bit(0, &ms->timer_pending); 100 wakeup_mirrord(ms); 101} 102 103static void delayed_wake(struct mirror_set *ms) 104{ 105 if (test_and_set_bit(0, &ms->timer_pending)) 106 return; 107 108 ms->timer.expires = jiffies + HZ / 5; 109 ms->timer.data = (unsigned long) ms; 110 ms->timer.function = delayed_wake_fn; 111 add_timer(&ms->timer); 112} 113 114static void wakeup_all_recovery_waiters(void *context) 115{ 116 wake_up_all(&_kmirrord_recovery_stopped); 117} 118 119static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) 120{ 121 unsigned long flags; 122 int should_wake = 0; 123 struct bio_list *bl; 124 125 bl = (rw == WRITE) ? &ms->writes : &ms->reads; 126 spin_lock_irqsave(&ms->lock, flags); 127 should_wake = !(bl->head); 128 bio_list_add(bl, bio); 129 spin_unlock_irqrestore(&ms->lock, flags); 130 131 if (should_wake) 132 wakeup_mirrord(ms); 133} 134 135static void dispatch_bios(void *context, struct bio_list *bio_list) 136{ 137 struct mirror_set *ms = context; 138 struct bio *bio; 139 140 while ((bio = bio_list_pop(bio_list))) 141 queue_bio(ms, bio, WRITE); 142} 143 144#define MIN_READ_RECORDS 20 145struct dm_raid1_read_record { 146 struct mirror *m; 147 struct dm_bio_details details; 148}; 149 150static struct kmem_cache *_dm_raid1_read_record_cache; 151 152/* 153 * Every mirror should look like this one. 154 */ 155#define DEFAULT_MIRROR 0 156 157/* 158 * This is yucky. We squirrel the mirror struct away inside 159 * bi_next for read/write buffers. This is safe since the bh 160 * doesn't get submitted to the lower levels of block layer. 161 */ 162static struct mirror *bio_get_m(struct bio *bio) 163{ 164 return (struct mirror *) bio->bi_next; 165} 166 167static void bio_set_m(struct bio *bio, struct mirror *m) 168{ 169 bio->bi_next = (struct bio *) m; 170} 171 172static struct mirror *get_default_mirror(struct mirror_set *ms) 173{ 174 return &ms->mirror[atomic_read(&ms->default_mirror)]; 175} 176 177static void set_default_mirror(struct mirror *m) 178{ 179 struct mirror_set *ms = m->ms; 180 struct mirror *m0 = &(ms->mirror[0]); 181 182 atomic_set(&ms->default_mirror, m - m0); 183} 184 185static struct mirror *get_valid_mirror(struct mirror_set *ms) 186{ 187 struct mirror *m; 188 189 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++) 190 if (!atomic_read(&m->error_count)) 191 return m; 192 193 return NULL; 194} 195 196/* fail_mirror 197 * @m: mirror device to fail 198 * @error_type: one of the enum's, DM_RAID1_*_ERROR 199 * 200 * If errors are being handled, record the type of 201 * error encountered for this device. If this type 202 * of error has already been recorded, we can return; 203 * otherwise, we must signal userspace by triggering 204 * an event. Additionally, if the device is the 205 * primary device, we must choose a new primary, but 206 * only if the mirror is in-sync. 207 * 208 * This function must not block. 209 */ 210static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type) 211{ 212 struct mirror_set *ms = m->ms; 213 struct mirror *new; 214 215 ms->leg_failure = 1; 216 217 /* 218 * error_count is used for nothing more than a 219 * simple way to tell if a device has encountered 220 * errors. 221 */ 222 atomic_inc(&m->error_count); 223 224 if (test_and_set_bit(error_type, &m->error_type)) 225 return; 226 227 if (!errors_handled(ms)) 228 return; 229 230 if (m != get_default_mirror(ms)) 231 goto out; 232 233 if (!ms->in_sync) { 234 /* 235 * Better to issue requests to same failing device 236 * than to risk returning corrupt data. 237 */ 238 DMERR("Primary mirror (%s) failed while out-of-sync: " 239 "Reads may fail.", m->dev->name); 240 goto out; 241 } 242 243 new = get_valid_mirror(ms); 244 if (new) 245 set_default_mirror(new); 246 else 247 DMWARN("All sides of mirror have failed."); 248 249out: 250 schedule_work(&ms->trigger_event); 251} 252 253static int mirror_flush(struct dm_target *ti) 254{ 255 struct mirror_set *ms = ti->private; 256 unsigned long error_bits; 257 258 unsigned int i; 259 struct dm_io_region io[ms->nr_mirrors]; 260 struct mirror *m; 261 struct dm_io_request io_req = { 262 .bi_rw = WRITE_FLUSH, 263 .mem.type = DM_IO_KMEM, 264 .mem.ptr.addr = NULL, 265 .client = ms->io_client, 266 }; 267 268 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) { 269 io[i].bdev = m->dev->bdev; 270 io[i].sector = 0; 271 io[i].count = 0; 272 } 273 274 error_bits = -1; 275 dm_io(&io_req, ms->nr_mirrors, io, &error_bits); 276 if (unlikely(error_bits != 0)) { 277 for (i = 0; i < ms->nr_mirrors; i++) 278 if (test_bit(i, &error_bits)) 279 fail_mirror(ms->mirror + i, 280 DM_RAID1_FLUSH_ERROR); 281 return -EIO; 282 } 283 284 return 0; 285} 286 287/*----------------------------------------------------------------- 288 * Recovery. 289 * 290 * When a mirror is first activated we may find that some regions 291 * are in the no-sync state. We have to recover these by 292 * recopying from the default mirror to all the others. 293 *---------------------------------------------------------------*/ 294static void recovery_complete(int read_err, unsigned long write_err, 295 void *context) 296{ 297 struct dm_region *reg = context; 298 struct mirror_set *ms = dm_rh_region_context(reg); 299 int m, bit = 0; 300 301 if (read_err) { 302 /* Read error means the failure of default mirror. */ 303 DMERR_LIMIT("Unable to read primary mirror during recovery"); 304 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR); 305 } 306 307 if (write_err) { 308 DMERR_LIMIT("Write error during recovery (error = 0x%lx)", 309 write_err); 310 /* 311 * Bits correspond to devices (excluding default mirror). 312 * The default mirror cannot change during recovery. 313 */ 314 for (m = 0; m < ms->nr_mirrors; m++) { 315 if (&ms->mirror[m] == get_default_mirror(ms)) 316 continue; 317 if (test_bit(bit, &write_err)) 318 fail_mirror(ms->mirror + m, 319 DM_RAID1_SYNC_ERROR); 320 bit++; 321 } 322 } 323 324 dm_rh_recovery_end(reg, !(read_err || write_err)); 325} 326 327static int recover(struct mirror_set *ms, struct dm_region *reg) 328{ 329 int r; 330 unsigned i; 331 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 332 struct mirror *m; 333 unsigned long flags = 0; 334 region_t key = dm_rh_get_region_key(reg); 335 sector_t region_size = dm_rh_get_region_size(ms->rh); 336 337 /* fill in the source */ 338 m = get_default_mirror(ms); 339 from.bdev = m->dev->bdev; 340 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 341 if (key == (ms->nr_regions - 1)) { 342 /* 343 * The final region may be smaller than 344 * region_size. 345 */ 346 from.count = ms->ti->len & (region_size - 1); 347 if (!from.count) 348 from.count = region_size; 349 } else 350 from.count = region_size; 351 352 /* fill in the destinations */ 353 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 354 if (&ms->mirror[i] == get_default_mirror(ms)) 355 continue; 356 357 m = ms->mirror + i; 358 dest->bdev = m->dev->bdev; 359 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); 360 dest->count = from.count; 361 dest++; 362 } 363 364 /* hand to kcopyd */ 365 if (!errors_handled(ms)) 366 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags); 367 368 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, 369 flags, recovery_complete, reg); 370 371 return r; 372} 373 374static void do_recovery(struct mirror_set *ms) 375{ 376 struct dm_region *reg; 377 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 378 int r; 379 380 /* 381 * Start quiescing some regions. 382 */ 383 dm_rh_recovery_prepare(ms->rh); 384 385 /* 386 * Copy any already quiesced regions. 387 */ 388 while ((reg = dm_rh_recovery_start(ms->rh))) { 389 r = recover(ms, reg); 390 if (r) 391 dm_rh_recovery_end(reg, 0); 392 } 393 394 /* 395 * Update the in sync flag. 396 */ 397 if (!ms->in_sync && 398 (log->type->get_sync_count(log) == ms->nr_regions)) { 399 /* the sync is complete */ 400 dm_table_event(ms->ti->table); 401 ms->in_sync = 1; 402 } 403} 404 405/*----------------------------------------------------------------- 406 * Reads 407 *---------------------------------------------------------------*/ 408static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) 409{ 410 struct mirror *m = get_default_mirror(ms); 411 412 do { 413 if (likely(!atomic_read(&m->error_count))) 414 return m; 415 416 if (m-- == ms->mirror) 417 m += ms->nr_mirrors; 418 } while (m != get_default_mirror(ms)); 419 420 return NULL; 421} 422 423static int default_ok(struct mirror *m) 424{ 425 struct mirror *default_mirror = get_default_mirror(m->ms); 426 427 return !atomic_read(&default_mirror->error_count); 428} 429 430static int mirror_available(struct mirror_set *ms, struct bio *bio) 431{ 432 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 433 region_t region = dm_rh_bio_to_region(ms->rh, bio); 434 435 if (log->type->in_sync(log, region, 0)) 436 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 437 438 return 0; 439} 440 441/* 442 * remap a buffer to a particular mirror. 443 */ 444static sector_t map_sector(struct mirror *m, struct bio *bio) 445{ 446 if (unlikely(!bio->bi_size)) 447 return 0; 448 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 449} 450 451static void map_bio(struct mirror *m, struct bio *bio) 452{ 453 bio->bi_bdev = m->dev->bdev; 454 bio->bi_sector = map_sector(m, bio); 455} 456 457static void map_region(struct dm_io_region *io, struct mirror *m, 458 struct bio *bio) 459{ 460 io->bdev = m->dev->bdev; 461 io->sector = map_sector(m, bio); 462 io->count = bio->bi_size >> 9; 463} 464 465static void hold_bio(struct mirror_set *ms, struct bio *bio) 466{ 467 /* 468 * Lock is required to avoid race condition during suspend 469 * process. 470 */ 471 spin_lock_irq(&ms->lock); 472 473 if (atomic_read(&ms->suspend)) { 474 spin_unlock_irq(&ms->lock); 475 476 /* 477 * If device is suspended, complete the bio. 478 */ 479 if (dm_noflush_suspending(ms->ti)) 480 bio_endio(bio, DM_ENDIO_REQUEUE); 481 else 482 bio_endio(bio, -EIO); 483 return; 484 } 485 486 /* 487 * Hold bio until the suspend is complete. 488 */ 489 bio_list_add(&ms->holds, bio); 490 spin_unlock_irq(&ms->lock); 491} 492 493/*----------------------------------------------------------------- 494 * Reads 495 *---------------------------------------------------------------*/ 496static void read_callback(unsigned long error, void *context) 497{ 498 struct bio *bio = context; 499 struct mirror *m; 500 501 m = bio_get_m(bio); 502 bio_set_m(bio, NULL); 503 504 if (likely(!error)) { 505 bio_endio(bio, 0); 506 return; 507 } 508 509 fail_mirror(m, DM_RAID1_READ_ERROR); 510 511 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) { 512 DMWARN_LIMIT("Read failure on mirror device %s. " 513 "Trying alternative device.", 514 m->dev->name); 515 queue_bio(m->ms, bio, bio_rw(bio)); 516 return; 517 } 518 519 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.", 520 m->dev->name); 521 bio_endio(bio, -EIO); 522} 523 524/* Asynchronous read. */ 525static void read_async_bio(struct mirror *m, struct bio *bio) 526{ 527 struct dm_io_region io; 528 struct dm_io_request io_req = { 529 .bi_rw = READ, 530 .mem.type = DM_IO_BVEC, 531 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 532 .notify.fn = read_callback, 533 .notify.context = bio, 534 .client = m->ms->io_client, 535 }; 536 537 map_region(&io, m, bio); 538 bio_set_m(bio, m); 539 BUG_ON(dm_io(&io_req, 1, &io, NULL)); 540} 541 542static inline int region_in_sync(struct mirror_set *ms, region_t region, 543 int may_block) 544{ 545 int state = dm_rh_get_state(ms->rh, region, may_block); 546 return state == DM_RH_CLEAN || state == DM_RH_DIRTY; 547} 548 549static void do_reads(struct mirror_set *ms, struct bio_list *reads) 550{ 551 region_t region; 552 struct bio *bio; 553 struct mirror *m; 554 555 while ((bio = bio_list_pop(reads))) { 556 region = dm_rh_bio_to_region(ms->rh, bio); 557 m = get_default_mirror(ms); 558 559 /* 560 * We can only read balance if the region is in sync. 561 */ 562 if (likely(region_in_sync(ms, region, 1))) 563 m = choose_mirror(ms, bio->bi_sector); 564 else if (m && atomic_read(&m->error_count)) 565 m = NULL; 566 567 if (likely(m)) 568 read_async_bio(m, bio); 569 else 570 bio_endio(bio, -EIO); 571 } 572} 573 574/*----------------------------------------------------------------- 575 * Writes. 576 * 577 * We do different things with the write io depending on the 578 * state of the region that it's in: 579 * 580 * SYNC: increment pending, use kcopyd to write to *all* mirrors 581 * RECOVERING: delay the io until recovery completes 582 * NOSYNC: increment pending, just write to the default mirror 583 *---------------------------------------------------------------*/ 584 585 586static void write_callback(unsigned long error, void *context) 587{ 588 unsigned i, ret = 0; 589 struct bio *bio = (struct bio *) context; 590 struct mirror_set *ms; 591 int should_wake = 0; 592 unsigned long flags; 593 594 ms = bio_get_m(bio)->ms; 595 bio_set_m(bio, NULL); 596 597 /* 598 * NOTE: We don't decrement the pending count here, 599 * instead it is done by the targets endio function. 600 * This way we handle both writes to SYNC and NOSYNC 601 * regions with the same code. 602 */ 603 if (likely(!error)) { 604 bio_endio(bio, ret); 605 return; 606 } 607 608 for (i = 0; i < ms->nr_mirrors; i++) 609 if (test_bit(i, &error)) 610 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); 611 612 /* 613 * Need to raise event. Since raising 614 * events can block, we need to do it in 615 * the main thread. 616 */ 617 spin_lock_irqsave(&ms->lock, flags); 618 if (!ms->failures.head) 619 should_wake = 1; 620 bio_list_add(&ms->failures, bio); 621 spin_unlock_irqrestore(&ms->lock, flags); 622 if (should_wake) 623 wakeup_mirrord(ms); 624} 625 626static void do_write(struct mirror_set *ms, struct bio *bio) 627{ 628 unsigned int i; 629 struct dm_io_region io[ms->nr_mirrors], *dest = io; 630 struct mirror *m; 631 struct dm_io_request io_req = { 632 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 633 .mem.type = DM_IO_BVEC, 634 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 635 .notify.fn = write_callback, 636 .notify.context = bio, 637 .client = ms->io_client, 638 }; 639 640 if (bio->bi_rw & REQ_DISCARD) { 641 io_req.bi_rw |= REQ_DISCARD; 642 io_req.mem.type = DM_IO_KMEM; 643 io_req.mem.ptr.addr = NULL; 644 } 645 646 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) 647 map_region(dest++, m, bio); 648 649 /* 650 * Use default mirror because we only need it to retrieve the reference 651 * to the mirror set in write_callback(). 652 */ 653 bio_set_m(bio, get_default_mirror(ms)); 654 655 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); 656} 657 658static void do_writes(struct mirror_set *ms, struct bio_list *writes) 659{ 660 int state; 661 struct bio *bio; 662 struct bio_list sync, nosync, recover, *this_list = NULL; 663 struct bio_list requeue; 664 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 665 region_t region; 666 667 if (!writes->head) 668 return; 669 670 /* 671 * Classify each write. 672 */ 673 bio_list_init(&sync); 674 bio_list_init(&nosync); 675 bio_list_init(&recover); 676 bio_list_init(&requeue); 677 678 while ((bio = bio_list_pop(writes))) { 679 if ((bio->bi_rw & REQ_FLUSH) || 680 (bio->bi_rw & REQ_DISCARD)) { 681 bio_list_add(&sync, bio); 682 continue; 683 } 684 685 region = dm_rh_bio_to_region(ms->rh, bio); 686 687 if (log->type->is_remote_recovering && 688 log->type->is_remote_recovering(log, region)) { 689 bio_list_add(&requeue, bio); 690 continue; 691 } 692 693 state = dm_rh_get_state(ms->rh, region, 1); 694 switch (state) { 695 case DM_RH_CLEAN: 696 case DM_RH_DIRTY: 697 this_list = &sync; 698 break; 699 700 case DM_RH_NOSYNC: 701 this_list = &nosync; 702 break; 703 704 case DM_RH_RECOVERING: 705 this_list = &recover; 706 break; 707 } 708 709 bio_list_add(this_list, bio); 710 } 711 712 /* 713 * Add bios that are delayed due to remote recovery 714 * back on to the write queue 715 */ 716 if (unlikely(requeue.head)) { 717 spin_lock_irq(&ms->lock); 718 bio_list_merge(&ms->writes, &requeue); 719 spin_unlock_irq(&ms->lock); 720 delayed_wake(ms); 721 } 722 723 /* 724 * Increment the pending counts for any regions that will 725 * be written to (writes to recover regions are going to 726 * be delayed). 727 */ 728 dm_rh_inc_pending(ms->rh, &sync); 729 dm_rh_inc_pending(ms->rh, &nosync); 730 731 /* 732 * If the flush fails on a previous call and succeeds here, 733 * we must not reset the log_failure variable. We need 734 * userspace interaction to do that. 735 */ 736 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; 737 738 /* 739 * Dispatch io. 740 */ 741 if (unlikely(ms->log_failure) && errors_handled(ms)) { 742 spin_lock_irq(&ms->lock); 743 bio_list_merge(&ms->failures, &sync); 744 spin_unlock_irq(&ms->lock); 745 wakeup_mirrord(ms); 746 } else 747 while ((bio = bio_list_pop(&sync))) 748 do_write(ms, bio); 749 750 while ((bio = bio_list_pop(&recover))) 751 dm_rh_delay(ms->rh, bio); 752 753 while ((bio = bio_list_pop(&nosync))) { 754 if (unlikely(ms->leg_failure) && errors_handled(ms)) { 755 spin_lock_irq(&ms->lock); 756 bio_list_add(&ms->failures, bio); 757 spin_unlock_irq(&ms->lock); 758 wakeup_mirrord(ms); 759 } else { 760 map_bio(get_default_mirror(ms), bio); 761 generic_make_request(bio); 762 } 763 } 764} 765 766static void do_failures(struct mirror_set *ms, struct bio_list *failures) 767{ 768 struct bio *bio; 769 770 if (likely(!failures->head)) 771 return; 772 773 /* 774 * If the log has failed, unattempted writes are being 775 * put on the holds list. We can't issue those writes 776 * until a log has been marked, so we must store them. 777 * 778 * If a 'noflush' suspend is in progress, we can requeue 779 * the I/O's to the core. This give userspace a chance 780 * to reconfigure the mirror, at which point the core 781 * will reissue the writes. If the 'noflush' flag is 782 * not set, we have no choice but to return errors. 783 * 784 * Some writes on the failures list may have been 785 * submitted before the log failure and represent a 786 * failure to write to one of the devices. It is ok 787 * for us to treat them the same and requeue them 788 * as well. 789 */ 790 while ((bio = bio_list_pop(failures))) { 791 if (!ms->log_failure) { 792 ms->in_sync = 0; 793 dm_rh_mark_nosync(ms->rh, bio); 794 } 795 796 /* 797 * If all the legs are dead, fail the I/O. 798 * If we have been told to handle errors, hold the bio 799 * and wait for userspace to deal with the problem. 800 * Otherwise pretend that the I/O succeeded. (This would 801 * be wrong if the failed leg returned after reboot and 802 * got replicated back to the good legs.) 803 */ 804 if (!get_valid_mirror(ms)) 805 bio_endio(bio, -EIO); 806 else if (errors_handled(ms)) 807 hold_bio(ms, bio); 808 else 809 bio_endio(bio, 0); 810 } 811} 812 813static void trigger_event(struct work_struct *work) 814{ 815 struct mirror_set *ms = 816 container_of(work, struct mirror_set, trigger_event); 817 818 dm_table_event(ms->ti->table); 819} 820 821/*----------------------------------------------------------------- 822 * kmirrord 823 *---------------------------------------------------------------*/ 824static void do_mirror(struct work_struct *work) 825{ 826 struct mirror_set *ms = container_of(work, struct mirror_set, 827 kmirrord_work); 828 struct bio_list reads, writes, failures; 829 unsigned long flags; 830 831 spin_lock_irqsave(&ms->lock, flags); 832 reads = ms->reads; 833 writes = ms->writes; 834 failures = ms->failures; 835 bio_list_init(&ms->reads); 836 bio_list_init(&ms->writes); 837 bio_list_init(&ms->failures); 838 spin_unlock_irqrestore(&ms->lock, flags); 839 840 dm_rh_update_states(ms->rh, errors_handled(ms)); 841 do_recovery(ms); 842 do_reads(ms, &reads); 843 do_writes(ms, &writes); 844 do_failures(ms, &failures); 845 846 dm_table_unplug_all(ms->ti->table); 847} 848 849/*----------------------------------------------------------------- 850 * Target functions 851 *---------------------------------------------------------------*/ 852static struct mirror_set *alloc_context(unsigned int nr_mirrors, 853 uint32_t region_size, 854 struct dm_target *ti, 855 struct dm_dirty_log *dl) 856{ 857 size_t len; 858 struct mirror_set *ms = NULL; 859 860 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 861 862 ms = kzalloc(len, GFP_KERNEL); 863 if (!ms) { 864 ti->error = "Cannot allocate mirror context"; 865 return NULL; 866 } 867 868 spin_lock_init(&ms->lock); 869 bio_list_init(&ms->reads); 870 bio_list_init(&ms->writes); 871 bio_list_init(&ms->failures); 872 bio_list_init(&ms->holds); 873 874 ms->ti = ti; 875 ms->nr_mirrors = nr_mirrors; 876 ms->nr_regions = dm_sector_div_up(ti->len, region_size); 877 ms->in_sync = 0; 878 ms->log_failure = 0; 879 ms->leg_failure = 0; 880 atomic_set(&ms->suspend, 0); 881 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 882 883 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, 884 _dm_raid1_read_record_cache); 885 886 if (!ms->read_record_pool) { 887 ti->error = "Error creating mirror read_record_pool"; 888 kfree(ms); 889 return NULL; 890 } 891 892 ms->io_client = dm_io_client_create(DM_IO_PAGES); 893 if (IS_ERR(ms->io_client)) { 894 ti->error = "Error creating dm_io client"; 895 mempool_destroy(ms->read_record_pool); 896 kfree(ms); 897 return NULL; 898 } 899 900 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, 901 wakeup_all_recovery_waiters, 902 ms->ti->begin, MAX_RECOVERY, 903 dl, region_size, ms->nr_regions); 904 if (IS_ERR(ms->rh)) { 905 ti->error = "Error creating dirty region hash"; 906 dm_io_client_destroy(ms->io_client); 907 mempool_destroy(ms->read_record_pool); 908 kfree(ms); 909 return NULL; 910 } 911 912 return ms; 913} 914 915static void free_context(struct mirror_set *ms, struct dm_target *ti, 916 unsigned int m) 917{ 918 while (m--) 919 dm_put_device(ti, ms->mirror[m].dev); 920 921 dm_io_client_destroy(ms->io_client); 922 dm_region_hash_destroy(ms->rh); 923 mempool_destroy(ms->read_record_pool); 924 kfree(ms); 925} 926 927static int get_mirror(struct mirror_set *ms, struct dm_target *ti, 928 unsigned int mirror, char **argv) 929{ 930 unsigned long long offset; 931 932 if (sscanf(argv[1], "%llu", &offset) != 1) { 933 ti->error = "Invalid offset"; 934 return -EINVAL; 935 } 936 937 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), 938 &ms->mirror[mirror].dev)) { 939 ti->error = "Device lookup failure"; 940 return -ENXIO; 941 } 942 943 ms->mirror[mirror].ms = ms; 944 atomic_set(&(ms->mirror[mirror].error_count), 0); 945 ms->mirror[mirror].error_type = 0; 946 ms->mirror[mirror].offset = offset; 947 948 return 0; 949} 950 951/* 952 * Create dirty log: log_type #log_params <log_params> 953 */ 954static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 955 unsigned argc, char **argv, 956 unsigned *args_used) 957{ 958 unsigned param_count; 959 struct dm_dirty_log *dl; 960 961 if (argc < 2) { 962 ti->error = "Insufficient mirror log arguments"; 963 return NULL; 964 } 965 966 if (sscanf(argv[1], "%u", ¶m_count) != 1) { 967 ti->error = "Invalid mirror log argument count"; 968 return NULL; 969 } 970 971 *args_used = 2 + param_count; 972 973 if (argc < *args_used) { 974 ti->error = "Insufficient mirror log arguments"; 975 return NULL; 976 } 977 978 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count, 979 argv + 2); 980 if (!dl) { 981 ti->error = "Error creating mirror dirty log"; 982 return NULL; 983 } 984 985 return dl; 986} 987 988static int parse_features(struct mirror_set *ms, unsigned argc, char **argv, 989 unsigned *args_used) 990{ 991 unsigned num_features; 992 struct dm_target *ti = ms->ti; 993 994 *args_used = 0; 995 996 if (!argc) 997 return 0; 998 999 if (sscanf(argv[0], "%u", &num_features) != 1) { 1000 ti->error = "Invalid number of features"; 1001 return -EINVAL; 1002 } 1003 1004 argc--; 1005 argv++; 1006 (*args_used)++; 1007 1008 if (num_features > argc) { 1009 ti->error = "Not enough arguments to support feature count"; 1010 return -EINVAL; 1011 } 1012 1013 if (!strcmp("handle_errors", argv[0])) 1014 ms->features |= DM_RAID1_HANDLE_ERRORS; 1015 else { 1016 ti->error = "Unrecognised feature requested"; 1017 return -EINVAL; 1018 } 1019 1020 (*args_used)++; 1021 1022 return 0; 1023} 1024 1025/* 1026 * Construct a mirror mapping: 1027 * 1028 * log_type #log_params <log_params> 1029 * #mirrors [mirror_path offset]{2,} 1030 * [#features <features>] 1031 * 1032 * log_type is "core" or "disk" 1033 * #log_params is between 1 and 3 1034 * 1035 * If present, features must be "handle_errors". 1036 */ 1037static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1038{ 1039 int r; 1040 unsigned int nr_mirrors, m, args_used; 1041 struct mirror_set *ms; 1042 struct dm_dirty_log *dl; 1043 1044 dl = create_dirty_log(ti, argc, argv, &args_used); 1045 if (!dl) 1046 return -EINVAL; 1047 1048 argv += args_used; 1049 argc -= args_used; 1050 1051 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || 1052 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { 1053 ti->error = "Invalid number of mirrors"; 1054 dm_dirty_log_destroy(dl); 1055 return -EINVAL; 1056 } 1057 1058 argv++, argc--; 1059 1060 if (argc < nr_mirrors * 2) { 1061 ti->error = "Too few mirror arguments"; 1062 dm_dirty_log_destroy(dl); 1063 return -EINVAL; 1064 } 1065 1066 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); 1067 if (!ms) { 1068 dm_dirty_log_destroy(dl); 1069 return -ENOMEM; 1070 } 1071 1072 /* Get the mirror parameter sets */ 1073 for (m = 0; m < nr_mirrors; m++) { 1074 r = get_mirror(ms, ti, m, argv); 1075 if (r) { 1076 free_context(ms, ti, m); 1077 return r; 1078 } 1079 argv += 2; 1080 argc -= 2; 1081 } 1082 1083 ti->private = ms; 1084 ti->split_io = dm_rh_get_region_size(ms->rh); 1085 ti->num_flush_requests = 1; 1086 ti->num_discard_requests = 1; 1087 1088 ms->kmirrord_wq = alloc_workqueue("kmirrord", 1089 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); 1090 if (!ms->kmirrord_wq) { 1091 DMERR("couldn't start kmirrord"); 1092 r = -ENOMEM; 1093 goto err_free_context; 1094 } 1095 INIT_WORK(&ms->kmirrord_work, do_mirror); 1096 init_timer(&ms->timer); 1097 ms->timer_pending = 0; 1098 INIT_WORK(&ms->trigger_event, trigger_event); 1099 1100 r = parse_features(ms, argc, argv, &args_used); 1101 if (r) 1102 goto err_destroy_wq; 1103 1104 argv += args_used; 1105 argc -= args_used; 1106 1107 /* 1108 * Any read-balancing addition depends on the 1109 * DM_RAID1_HANDLE_ERRORS flag being present. 1110 * This is because the decision to balance depends 1111 * on the sync state of a region. If the above 1112 * flag is not present, we ignore errors; and 1113 * the sync state may be inaccurate. 1114 */ 1115 1116 if (argc) { 1117 ti->error = "Too many mirror arguments"; 1118 r = -EINVAL; 1119 goto err_destroy_wq; 1120 } 1121 1122 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); 1123 if (r) 1124 goto err_destroy_wq; 1125 1126 wakeup_mirrord(ms); 1127 return 0; 1128 1129err_destroy_wq: 1130 destroy_workqueue(ms->kmirrord_wq); 1131err_free_context: 1132 free_context(ms, ti, ms->nr_mirrors); 1133 return r; 1134} 1135 1136static void mirror_dtr(struct dm_target *ti) 1137{ 1138 struct mirror_set *ms = (struct mirror_set *) ti->private; 1139 1140 del_timer_sync(&ms->timer); 1141 flush_workqueue(ms->kmirrord_wq); 1142 flush_work_sync(&ms->trigger_event); 1143 dm_kcopyd_client_destroy(ms->kcopyd_client); 1144 destroy_workqueue(ms->kmirrord_wq); 1145 free_context(ms, ti, ms->nr_mirrors); 1146} 1147 1148/* 1149 * Mirror mapping function 1150 */ 1151static int mirror_map(struct dm_target *ti, struct bio *bio, 1152 union map_info *map_context) 1153{ 1154 int r, rw = bio_rw(bio); 1155 struct mirror *m; 1156 struct mirror_set *ms = ti->private; 1157 struct dm_raid1_read_record *read_record = NULL; 1158 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1159 1160 if (rw == WRITE) { 1161 /* Save region for mirror_end_io() handler */ 1162 map_context->ll = dm_rh_bio_to_region(ms->rh, bio); 1163 queue_bio(ms, bio, rw); 1164 return DM_MAPIO_SUBMITTED; 1165 } 1166 1167 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); 1168 if (r < 0 && r != -EWOULDBLOCK) 1169 return r; 1170 1171 /* 1172 * If region is not in-sync queue the bio. 1173 */ 1174 if (!r || (r == -EWOULDBLOCK)) { 1175 if (rw == READA) 1176 return -EWOULDBLOCK; 1177 1178 queue_bio(ms, bio, rw); 1179 return DM_MAPIO_SUBMITTED; 1180 } 1181 1182 /* 1183 * The region is in-sync and we can perform reads directly. 1184 * Store enough information so we can retry if it fails. 1185 */ 1186 m = choose_mirror(ms, bio->bi_sector); 1187 if (unlikely(!m)) 1188 return -EIO; 1189 1190 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); 1191 if (likely(read_record)) { 1192 dm_bio_record(&read_record->details, bio); 1193 map_context->ptr = read_record; 1194 read_record->m = m; 1195 } 1196 1197 map_bio(m, bio); 1198 1199 return DM_MAPIO_REMAPPED; 1200} 1201 1202static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1203 int error, union map_info *map_context) 1204{ 1205 int rw = bio_rw(bio); 1206 struct mirror_set *ms = (struct mirror_set *) ti->private; 1207 struct mirror *m = NULL; 1208 struct dm_bio_details *bd = NULL; 1209 struct dm_raid1_read_record *read_record = map_context->ptr; 1210 1211 /* 1212 * We need to dec pending if this was a write. 1213 */ 1214 if (rw == WRITE) { 1215 if (!(bio->bi_rw & REQ_FLUSH)) 1216 dm_rh_dec(ms->rh, map_context->ll); 1217 return error; 1218 } 1219 1220 if (error == -EOPNOTSUPP) 1221 goto out; 1222 1223 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 1224 goto out; 1225 1226 if (unlikely(error)) { 1227 if (!read_record) { 1228 /* 1229 * There wasn't enough memory to record necessary 1230 * information for a retry or there was no other 1231 * mirror in-sync. 1232 */ 1233 DMERR_LIMIT("Mirror read failed."); 1234 return -EIO; 1235 } 1236 1237 m = read_record->m; 1238 1239 DMERR("Mirror read failed from %s. Trying alternative device.", 1240 m->dev->name); 1241 1242 fail_mirror(m, DM_RAID1_READ_ERROR); 1243 1244 /* 1245 * A failed read is requeued for another attempt using an intact 1246 * mirror. 1247 */ 1248 if (default_ok(m) || mirror_available(ms, bio)) { 1249 bd = &read_record->details; 1250 1251 dm_bio_restore(bd, bio); 1252 mempool_free(read_record, ms->read_record_pool); 1253 map_context->ptr = NULL; 1254 queue_bio(ms, bio, rw); 1255 return 1; 1256 } 1257 DMERR("All replicated volumes dead, failing I/O"); 1258 } 1259 1260out: 1261 if (read_record) { 1262 mempool_free(read_record, ms->read_record_pool); 1263 map_context->ptr = NULL; 1264 } 1265 1266 return error; 1267} 1268 1269static void mirror_presuspend(struct dm_target *ti) 1270{ 1271 struct mirror_set *ms = (struct mirror_set *) ti->private; 1272 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1273 1274 struct bio_list holds; 1275 struct bio *bio; 1276 1277 atomic_set(&ms->suspend, 1); 1278 1279 /* 1280 * Process bios in the hold list to start recovery waiting 1281 * for bios in the hold list. After the process, no bio has 1282 * a chance to be added in the hold list because ms->suspend 1283 * is set. 1284 */ 1285 spin_lock_irq(&ms->lock); 1286 holds = ms->holds; 1287 bio_list_init(&ms->holds); 1288 spin_unlock_irq(&ms->lock); 1289 1290 while ((bio = bio_list_pop(&holds))) 1291 hold_bio(ms, bio); 1292 1293 /* 1294 * We must finish up all the work that we've 1295 * generated (i.e. recovery work). 1296 */ 1297 dm_rh_stop_recovery(ms->rh); 1298 1299 wait_event(_kmirrord_recovery_stopped, 1300 !dm_rh_recovery_in_flight(ms->rh)); 1301 1302 if (log->type->presuspend && log->type->presuspend(log)) 1303 /* FIXME: need better error handling */ 1304 DMWARN("log presuspend failed"); 1305 1306 /* 1307 * Now that recovery is complete/stopped and the 1308 * delayed bios are queued, we need to wait for 1309 * the worker thread to complete. This way, 1310 * we know that all of our I/O has been pushed. 1311 */ 1312 flush_workqueue(ms->kmirrord_wq); 1313} 1314 1315static void mirror_postsuspend(struct dm_target *ti) 1316{ 1317 struct mirror_set *ms = ti->private; 1318 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1319 1320 if (log->type->postsuspend && log->type->postsuspend(log)) 1321 /* FIXME: need better error handling */ 1322 DMWARN("log postsuspend failed"); 1323} 1324 1325static void mirror_resume(struct dm_target *ti) 1326{ 1327 struct mirror_set *ms = ti->private; 1328 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1329 1330 atomic_set(&ms->suspend, 0); 1331 if (log->type->resume && log->type->resume(log)) 1332 /* FIXME: need better error handling */ 1333 DMWARN("log resume failed"); 1334 dm_rh_start_recovery(ms->rh); 1335} 1336 1337/* 1338 * device_status_char 1339 * @m: mirror device/leg we want the status of 1340 * 1341 * We return one character representing the most severe error 1342 * we have encountered. 1343 * A => Alive - No failures 1344 * D => Dead - A write failure occurred leaving mirror out-of-sync 1345 * S => Sync - A sychronization failure occurred, mirror out-of-sync 1346 * R => Read - A read failure occurred, mirror data unaffected 1347 * 1348 * Returns: <char> 1349 */ 1350static char device_status_char(struct mirror *m) 1351{ 1352 if (!atomic_read(&(m->error_count))) 1353 return 'A'; 1354 1355 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' : 1356 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : 1357 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' : 1358 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U'; 1359} 1360 1361 1362static int mirror_status(struct dm_target *ti, status_type_t type, 1363 char *result, unsigned int maxlen) 1364{ 1365 unsigned int m, sz = 0; 1366 struct mirror_set *ms = (struct mirror_set *) ti->private; 1367 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1368 char buffer[ms->nr_mirrors + 1]; 1369 1370 switch (type) { 1371 case STATUSTYPE_INFO: 1372 DMEMIT("%d ", ms->nr_mirrors); 1373 for (m = 0; m < ms->nr_mirrors; m++) { 1374 DMEMIT("%s ", ms->mirror[m].dev->name); 1375 buffer[m] = device_status_char(&(ms->mirror[m])); 1376 } 1377 buffer[m] = '\0'; 1378 1379 DMEMIT("%llu/%llu 1 %s ", 1380 (unsigned long long)log->type->get_sync_count(log), 1381 (unsigned long long)ms->nr_regions, buffer); 1382 1383 sz += log->type->status(log, type, result+sz, maxlen-sz); 1384 1385 break; 1386 1387 case STATUSTYPE_TABLE: 1388 sz = log->type->status(log, type, result, maxlen); 1389 1390 DMEMIT("%d", ms->nr_mirrors); 1391 for (m = 0; m < ms->nr_mirrors; m++) 1392 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1393 (unsigned long long)ms->mirror[m].offset); 1394 1395 if (ms->features & DM_RAID1_HANDLE_ERRORS) 1396 DMEMIT(" 1 handle_errors"); 1397 } 1398 1399 return 0; 1400} 1401 1402static int mirror_iterate_devices(struct dm_target *ti, 1403 iterate_devices_callout_fn fn, void *data) 1404{ 1405 struct mirror_set *ms = ti->private; 1406 int ret = 0; 1407 unsigned i; 1408 1409 for (i = 0; !ret && i < ms->nr_mirrors; i++) 1410 ret = fn(ti, ms->mirror[i].dev, 1411 ms->mirror[i].offset, ti->len, data); 1412 1413 return ret; 1414} 1415 1416static struct target_type mirror_target = { 1417 .name = "mirror", 1418 .version = {1, 12, 1}, 1419 .module = THIS_MODULE, 1420 .ctr = mirror_ctr, 1421 .dtr = mirror_dtr, 1422 .map = mirror_map, 1423 .end_io = mirror_end_io, 1424 .presuspend = mirror_presuspend, 1425 .postsuspend = mirror_postsuspend, 1426 .resume = mirror_resume, 1427 .status = mirror_status, 1428 .iterate_devices = mirror_iterate_devices, 1429}; 1430 1431static int __init dm_mirror_init(void) 1432{ 1433 int r; 1434 1435 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); 1436 if (!_dm_raid1_read_record_cache) { 1437 DMERR("Can't allocate dm_raid1_read_record cache"); 1438 r = -ENOMEM; 1439 goto bad_cache; 1440 } 1441 1442 r = dm_register_target(&mirror_target); 1443 if (r < 0) { 1444 DMERR("Failed to register mirror target"); 1445 goto bad_target; 1446 } 1447 1448 return 0; 1449 1450bad_target: 1451 kmem_cache_destroy(_dm_raid1_read_record_cache); 1452bad_cache: 1453 return r; 1454} 1455 1456static void __exit dm_mirror_exit(void) 1457{ 1458 dm_unregister_target(&mirror_target); 1459 kmem_cache_destroy(_dm_raid1_read_record_cache); 1460} 1461 1462/* Module hooks */ 1463module_init(dm_mirror_init); 1464module_exit(dm_mirror_exit); 1465 1466MODULE_DESCRIPTION(DM_NAME " mirror target"); 1467MODULE_AUTHOR("Joe Thornber"); 1468MODULE_LICENSE("GPL"); 1469