dm-snap.c revision d698aa4500aa3ca9559142060caf0f79da998744
1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9#include <linux/blkdev.h> 10#include <linux/device-mapper.h> 11#include <linux/delay.h> 12#include <linux/fs.h> 13#include <linux/init.h> 14#include <linux/kdev_t.h> 15#include <linux/list.h> 16#include <linux/mempool.h> 17#include <linux/module.h> 18#include <linux/slab.h> 19#include <linux/vmalloc.h> 20#include <linux/log2.h> 21#include <linux/dm-kcopyd.h> 22#include <linux/workqueue.h> 23 24#include "dm-exception-store.h" 25 26#define DM_MSG_PREFIX "snapshots" 27 28static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; 29 30#define dm_target_is_snapshot_merge(ti) \ 31 ((ti)->type->name == dm_snapshot_merge_target_name) 32 33/* 34 * The percentage increment we will wake up users at 35 */ 36#define WAKE_UP_PERCENT 5 37 38/* 39 * kcopyd priority of snapshot operations 40 */ 41#define SNAPSHOT_COPY_PRIORITY 2 42 43/* 44 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 45 */ 46#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 47 48/* 49 * The size of the mempool used to track chunks in use. 50 */ 51#define MIN_IOS 256 52 53#define DM_TRACKED_CHUNK_HASH_SIZE 16 54#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 55 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 56 57struct dm_exception_table { 58 uint32_t hash_mask; 59 unsigned hash_shift; 60 struct list_head *table; 61}; 62 63struct dm_snapshot { 64 struct rw_semaphore lock; 65 66 struct dm_dev *origin; 67 struct dm_dev *cow; 68 69 struct dm_target *ti; 70 71 /* List of snapshots per Origin */ 72 struct list_head list; 73 74 /* You can't use a snapshot if this is 0 (e.g. if full) */ 75 int valid; 76 77 /* Origin writes don't trigger exceptions until this is set */ 78 int active; 79 80 /* Whether or not owning mapped_device is suspended */ 81 int suspended; 82 83 mempool_t *pending_pool; 84 85 atomic_t pending_exceptions_count; 86 87 struct dm_exception_table pending; 88 struct dm_exception_table complete; 89 90 /* 91 * pe_lock protects all pending_exception operations and access 92 * as well as the snapshot_bios list. 93 */ 94 spinlock_t pe_lock; 95 96 /* The on disk metadata handler */ 97 struct dm_exception_store *store; 98 99 struct dm_kcopyd_client *kcopyd_client; 100 101 /* Queue of snapshot writes for ksnapd to flush */ 102 struct bio_list queued_bios; 103 struct work_struct queued_bios_work; 104 105 /* Chunks with outstanding reads */ 106 mempool_t *tracked_chunk_pool; 107 spinlock_t tracked_chunk_lock; 108 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 109}; 110 111struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 112{ 113 return s->cow; 114} 115EXPORT_SYMBOL(dm_snap_cow); 116 117static struct workqueue_struct *ksnapd; 118static void flush_queued_bios(struct work_struct *work); 119 120static sector_t chunk_to_sector(struct dm_exception_store *store, 121 chunk_t chunk) 122{ 123 return chunk << store->chunk_shift; 124} 125 126static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 127{ 128 /* 129 * There is only ever one instance of a particular block 130 * device so we can compare pointers safely. 131 */ 132 return lhs == rhs; 133} 134 135struct dm_snap_pending_exception { 136 struct dm_exception e; 137 138 /* 139 * Origin buffers waiting for this to complete are held 140 * in a bio list 141 */ 142 struct bio_list origin_bios; 143 struct bio_list snapshot_bios; 144 145 /* 146 * Short-term queue of pending exceptions prior to submission. 147 */ 148 struct list_head list; 149 150 /* 151 * The primary pending_exception is the one that holds 152 * the ref_count and the list of origin_bios for a 153 * group of pending_exceptions. It is always last to get freed. 154 * These fields get set up when writing to the origin. 155 */ 156 struct dm_snap_pending_exception *primary_pe; 157 158 /* 159 * Number of pending_exceptions processing this chunk. 160 * When this drops to zero we must complete the origin bios. 161 * If incrementing or decrementing this, hold pe->snap->lock for 162 * the sibling concerned and not pe->primary_pe->snap->lock unless 163 * they are the same. 164 */ 165 atomic_t ref_count; 166 167 /* Pointer back to snapshot context */ 168 struct dm_snapshot *snap; 169 170 /* 171 * 1 indicates the exception has already been sent to 172 * kcopyd. 173 */ 174 int started; 175}; 176 177/* 178 * Hash table mapping origin volumes to lists of snapshots and 179 * a lock to protect it 180 */ 181static struct kmem_cache *exception_cache; 182static struct kmem_cache *pending_cache; 183 184struct dm_snap_tracked_chunk { 185 struct hlist_node node; 186 chunk_t chunk; 187}; 188 189static struct kmem_cache *tracked_chunk_cache; 190 191static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 192 chunk_t chunk) 193{ 194 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 195 GFP_NOIO); 196 unsigned long flags; 197 198 c->chunk = chunk; 199 200 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 201 hlist_add_head(&c->node, 202 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 203 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 204 205 return c; 206} 207 208static void stop_tracking_chunk(struct dm_snapshot *s, 209 struct dm_snap_tracked_chunk *c) 210{ 211 unsigned long flags; 212 213 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 214 hlist_del(&c->node); 215 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 216 217 mempool_free(c, s->tracked_chunk_pool); 218} 219 220static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 221{ 222 struct dm_snap_tracked_chunk *c; 223 struct hlist_node *hn; 224 int found = 0; 225 226 spin_lock_irq(&s->tracked_chunk_lock); 227 228 hlist_for_each_entry(c, hn, 229 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 230 if (c->chunk == chunk) { 231 found = 1; 232 break; 233 } 234 } 235 236 spin_unlock_irq(&s->tracked_chunk_lock); 237 238 return found; 239} 240 241/* 242 * This conflicting I/O is extremely improbable in the caller, 243 * so msleep(1) is sufficient and there is no need for a wait queue. 244 */ 245static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) 246{ 247 while (__chunk_is_tracked(s, chunk)) 248 msleep(1); 249} 250 251/* 252 * One of these per registered origin, held in the snapshot_origins hash 253 */ 254struct origin { 255 /* The origin device */ 256 struct block_device *bdev; 257 258 struct list_head hash_list; 259 260 /* List of snapshots for this origin */ 261 struct list_head snapshots; 262}; 263 264/* 265 * Size of the hash table for origin volumes. If we make this 266 * the size of the minors list then it should be nearly perfect 267 */ 268#define ORIGIN_HASH_SIZE 256 269#define ORIGIN_MASK 0xFF 270static struct list_head *_origins; 271static struct rw_semaphore _origins_lock; 272 273static int init_origin_hash(void) 274{ 275 int i; 276 277 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 278 GFP_KERNEL); 279 if (!_origins) { 280 DMERR("unable to allocate memory"); 281 return -ENOMEM; 282 } 283 284 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 285 INIT_LIST_HEAD(_origins + i); 286 init_rwsem(&_origins_lock); 287 288 return 0; 289} 290 291static void exit_origin_hash(void) 292{ 293 kfree(_origins); 294} 295 296static unsigned origin_hash(struct block_device *bdev) 297{ 298 return bdev->bd_dev & ORIGIN_MASK; 299} 300 301static struct origin *__lookup_origin(struct block_device *origin) 302{ 303 struct list_head *ol; 304 struct origin *o; 305 306 ol = &_origins[origin_hash(origin)]; 307 list_for_each_entry (o, ol, hash_list) 308 if (bdev_equal(o->bdev, origin)) 309 return o; 310 311 return NULL; 312} 313 314static void __insert_origin(struct origin *o) 315{ 316 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 317 list_add_tail(&o->hash_list, sl); 318} 319 320/* 321 * _origins_lock must be held when calling this function. 322 * Returns number of snapshots registered using the supplied cow device, plus: 323 * snap_src - a snapshot suitable for use as a source of exception handover 324 * snap_dest - a snapshot capable of receiving exception handover. 325 * 326 * Possible return values and states: 327 * 0: NULL, NULL - first new snapshot 328 * 1: snap_src, NULL - normal snapshot 329 * 2: snap_src, snap_dest - waiting for handover 330 * 2: snap_src, NULL - handed over, waiting for old to be deleted 331 * 1: NULL, snap_dest - source got destroyed without handover 332 */ 333static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, 334 struct dm_snapshot **snap_src, 335 struct dm_snapshot **snap_dest) 336{ 337 struct dm_snapshot *s; 338 struct origin *o; 339 int count = 0; 340 int active; 341 342 o = __lookup_origin(snap->origin->bdev); 343 if (!o) 344 goto out; 345 346 list_for_each_entry(s, &o->snapshots, list) { 347 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) 348 continue; 349 350 down_read(&s->lock); 351 active = s->active; 352 up_read(&s->lock); 353 354 if (active) { 355 if (snap_src) 356 *snap_src = s; 357 } else if (snap_dest) 358 *snap_dest = s; 359 360 count++; 361 } 362 363out: 364 return count; 365} 366 367/* 368 * On success, returns 1 if this snapshot is a handover destination, 369 * otherwise returns 0. 370 */ 371static int __validate_exception_handover(struct dm_snapshot *snap) 372{ 373 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 374 375 /* Does snapshot need exceptions handed over to it? */ 376 if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) || 377 snap_dest) { 378 snap->ti->error = "Snapshot cow pairing for exception " 379 "table handover failed"; 380 return -EINVAL; 381 } 382 383 /* 384 * If no snap_src was found, snap cannot become a handover 385 * destination. 386 */ 387 if (!snap_src) 388 return 0; 389 390 return 1; 391} 392 393static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) 394{ 395 struct dm_snapshot *l; 396 397 /* Sort the list according to chunk size, largest-first smallest-last */ 398 list_for_each_entry(l, &o->snapshots, list) 399 if (l->store->chunk_size < s->store->chunk_size) 400 break; 401 list_add_tail(&s->list, &l->list); 402} 403 404/* 405 * Make a note of the snapshot and its origin so we can look it 406 * up when the origin has a write on it. 407 * 408 * Also validate snapshot exception store handovers. 409 * On success, returns 1 if this registration is a handover destination, 410 * otherwise returns 0. 411 */ 412static int register_snapshot(struct dm_snapshot *snap) 413{ 414 struct origin *o, *new_o = NULL; 415 struct block_device *bdev = snap->origin->bdev; 416 int r = 0; 417 418 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 419 if (!new_o) 420 return -ENOMEM; 421 422 down_write(&_origins_lock); 423 424 r = __validate_exception_handover(snap); 425 if (r < 0) { 426 kfree(new_o); 427 goto out; 428 } 429 430 o = __lookup_origin(bdev); 431 if (o) 432 kfree(new_o); 433 else { 434 /* New origin */ 435 o = new_o; 436 437 /* Initialise the struct */ 438 INIT_LIST_HEAD(&o->snapshots); 439 o->bdev = bdev; 440 441 __insert_origin(o); 442 } 443 444 __insert_snapshot(o, snap); 445 446out: 447 up_write(&_origins_lock); 448 449 return r; 450} 451 452/* 453 * Move snapshot to correct place in list according to chunk size. 454 */ 455static void reregister_snapshot(struct dm_snapshot *s) 456{ 457 struct block_device *bdev = s->origin->bdev; 458 459 down_write(&_origins_lock); 460 461 list_del(&s->list); 462 __insert_snapshot(__lookup_origin(bdev), s); 463 464 up_write(&_origins_lock); 465} 466 467static void unregister_snapshot(struct dm_snapshot *s) 468{ 469 struct origin *o; 470 471 down_write(&_origins_lock); 472 o = __lookup_origin(s->origin->bdev); 473 474 list_del(&s->list); 475 if (o && list_empty(&o->snapshots)) { 476 list_del(&o->hash_list); 477 kfree(o); 478 } 479 480 up_write(&_origins_lock); 481} 482 483/* 484 * Implementation of the exception hash tables. 485 * The lowest hash_shift bits of the chunk number are ignored, allowing 486 * some consecutive chunks to be grouped together. 487 */ 488static int dm_exception_table_init(struct dm_exception_table *et, 489 uint32_t size, unsigned hash_shift) 490{ 491 unsigned int i; 492 493 et->hash_shift = hash_shift; 494 et->hash_mask = size - 1; 495 et->table = dm_vcalloc(size, sizeof(struct list_head)); 496 if (!et->table) 497 return -ENOMEM; 498 499 for (i = 0; i < size; i++) 500 INIT_LIST_HEAD(et->table + i); 501 502 return 0; 503} 504 505static void dm_exception_table_exit(struct dm_exception_table *et, 506 struct kmem_cache *mem) 507{ 508 struct list_head *slot; 509 struct dm_exception *ex, *next; 510 int i, size; 511 512 size = et->hash_mask + 1; 513 for (i = 0; i < size; i++) { 514 slot = et->table + i; 515 516 list_for_each_entry_safe (ex, next, slot, hash_list) 517 kmem_cache_free(mem, ex); 518 } 519 520 vfree(et->table); 521} 522 523static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) 524{ 525 return (chunk >> et->hash_shift) & et->hash_mask; 526} 527 528static void dm_remove_exception(struct dm_exception *e) 529{ 530 list_del(&e->hash_list); 531} 532 533/* 534 * Return the exception data for a sector, or NULL if not 535 * remapped. 536 */ 537static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, 538 chunk_t chunk) 539{ 540 struct list_head *slot; 541 struct dm_exception *e; 542 543 slot = &et->table[exception_hash(et, chunk)]; 544 list_for_each_entry (e, slot, hash_list) 545 if (chunk >= e->old_chunk && 546 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 547 return e; 548 549 return NULL; 550} 551 552static struct dm_exception *alloc_completed_exception(void) 553{ 554 struct dm_exception *e; 555 556 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 557 if (!e) 558 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 559 560 return e; 561} 562 563static void free_completed_exception(struct dm_exception *e) 564{ 565 kmem_cache_free(exception_cache, e); 566} 567 568static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 569{ 570 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 571 GFP_NOIO); 572 573 atomic_inc(&s->pending_exceptions_count); 574 pe->snap = s; 575 576 return pe; 577} 578 579static void free_pending_exception(struct dm_snap_pending_exception *pe) 580{ 581 struct dm_snapshot *s = pe->snap; 582 583 mempool_free(pe, s->pending_pool); 584 smp_mb__before_atomic_dec(); 585 atomic_dec(&s->pending_exceptions_count); 586} 587 588static void dm_insert_exception(struct dm_exception_table *eh, 589 struct dm_exception *new_e) 590{ 591 struct list_head *l; 592 struct dm_exception *e = NULL; 593 594 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 595 596 /* Add immediately if this table doesn't support consecutive chunks */ 597 if (!eh->hash_shift) 598 goto out; 599 600 /* List is ordered by old_chunk */ 601 list_for_each_entry_reverse(e, l, hash_list) { 602 /* Insert after an existing chunk? */ 603 if (new_e->old_chunk == (e->old_chunk + 604 dm_consecutive_chunk_count(e) + 1) && 605 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 606 dm_consecutive_chunk_count(e) + 1)) { 607 dm_consecutive_chunk_count_inc(e); 608 free_completed_exception(new_e); 609 return; 610 } 611 612 /* Insert before an existing chunk? */ 613 if (new_e->old_chunk == (e->old_chunk - 1) && 614 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 615 dm_consecutive_chunk_count_inc(e); 616 e->old_chunk--; 617 e->new_chunk--; 618 free_completed_exception(new_e); 619 return; 620 } 621 622 if (new_e->old_chunk > e->old_chunk) 623 break; 624 } 625 626out: 627 list_add(&new_e->hash_list, e ? &e->hash_list : l); 628} 629 630/* 631 * Callback used by the exception stores to load exceptions when 632 * initialising. 633 */ 634static int dm_add_exception(void *context, chunk_t old, chunk_t new) 635{ 636 struct dm_snapshot *s = context; 637 struct dm_exception *e; 638 639 e = alloc_completed_exception(); 640 if (!e) 641 return -ENOMEM; 642 643 e->old_chunk = old; 644 645 /* Consecutive_count is implicitly initialised to zero */ 646 e->new_chunk = new; 647 648 dm_insert_exception(&s->complete, e); 649 650 return 0; 651} 652 653#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) 654 655/* 656 * Return a minimum chunk size of all snapshots that have the specified origin. 657 * Return zero if the origin has no snapshots. 658 */ 659static sector_t __minimum_chunk_size(struct origin *o) 660{ 661 struct dm_snapshot *snap; 662 unsigned chunk_size = 0; 663 664 if (o) 665 list_for_each_entry(snap, &o->snapshots, list) 666 chunk_size = min_not_zero(chunk_size, 667 snap->store->chunk_size); 668 669 return chunk_size; 670} 671 672/* 673 * Hard coded magic. 674 */ 675static int calc_max_buckets(void) 676{ 677 /* use a fixed size of 2MB */ 678 unsigned long mem = 2 * 1024 * 1024; 679 mem /= sizeof(struct list_head); 680 681 return mem; 682} 683 684/* 685 * Allocate room for a suitable hash table. 686 */ 687static int init_hash_tables(struct dm_snapshot *s) 688{ 689 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 690 691 /* 692 * Calculate based on the size of the original volume or 693 * the COW volume... 694 */ 695 cow_dev_size = get_dev_size(s->cow->bdev); 696 origin_dev_size = get_dev_size(s->origin->bdev); 697 max_buckets = calc_max_buckets(); 698 699 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 700 hash_size = min(hash_size, max_buckets); 701 702 if (hash_size < 64) 703 hash_size = 64; 704 hash_size = rounddown_pow_of_two(hash_size); 705 if (dm_exception_table_init(&s->complete, hash_size, 706 DM_CHUNK_CONSECUTIVE_BITS)) 707 return -ENOMEM; 708 709 /* 710 * Allocate hash table for in-flight exceptions 711 * Make this smaller than the real hash table 712 */ 713 hash_size >>= 3; 714 if (hash_size < 64) 715 hash_size = 64; 716 717 if (dm_exception_table_init(&s->pending, hash_size, 0)) { 718 dm_exception_table_exit(&s->complete, exception_cache); 719 return -ENOMEM; 720 } 721 722 return 0; 723} 724 725/* 726 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 727 */ 728static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 729{ 730 struct dm_snapshot *s; 731 int i; 732 int r = -EINVAL; 733 char *origin_path, *cow_path; 734 unsigned args_used; 735 736 if (argc != 4) { 737 ti->error = "requires exactly 4 arguments"; 738 r = -EINVAL; 739 goto bad; 740 } 741 742 origin_path = argv[0]; 743 argv++; 744 argc--; 745 746 s = kmalloc(sizeof(*s), GFP_KERNEL); 747 if (!s) { 748 ti->error = "Cannot allocate snapshot context private " 749 "structure"; 750 r = -ENOMEM; 751 goto bad; 752 } 753 754 cow_path = argv[0]; 755 argv++; 756 argc--; 757 758 r = dm_get_device(ti, cow_path, 0, 0, 759 FMODE_READ | FMODE_WRITE, &s->cow); 760 if (r) { 761 ti->error = "Cannot get COW device"; 762 goto bad_cow; 763 } 764 765 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); 766 if (r) { 767 ti->error = "Couldn't create exception store"; 768 r = -EINVAL; 769 goto bad_store; 770 } 771 772 argv += args_used; 773 argc -= args_used; 774 775 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 776 if (r) { 777 ti->error = "Cannot get origin device"; 778 goto bad_origin; 779 } 780 781 s->ti = ti; 782 s->valid = 1; 783 s->active = 0; 784 s->suspended = 0; 785 atomic_set(&s->pending_exceptions_count, 0); 786 init_rwsem(&s->lock); 787 INIT_LIST_HEAD(&s->list); 788 spin_lock_init(&s->pe_lock); 789 790 /* Allocate hash table for COW data */ 791 if (init_hash_tables(s)) { 792 ti->error = "Unable to allocate hash table space"; 793 r = -ENOMEM; 794 goto bad_hash_tables; 795 } 796 797 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 798 if (r) { 799 ti->error = "Could not create kcopyd client"; 800 goto bad_kcopyd; 801 } 802 803 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 804 if (!s->pending_pool) { 805 ti->error = "Could not allocate mempool for pending exceptions"; 806 goto bad_pending_pool; 807 } 808 809 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 810 tracked_chunk_cache); 811 if (!s->tracked_chunk_pool) { 812 ti->error = "Could not allocate tracked_chunk mempool for " 813 "tracking reads"; 814 goto bad_tracked_chunk_pool; 815 } 816 817 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 818 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 819 820 spin_lock_init(&s->tracked_chunk_lock); 821 822 bio_list_init(&s->queued_bios); 823 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 824 825 ti->private = s; 826 ti->num_flush_requests = 1; 827 828 /* Add snapshot to the list of snapshots for this origin */ 829 /* Exceptions aren't triggered till snapshot_resume() is called */ 830 r = register_snapshot(s); 831 if (r == -ENOMEM) { 832 ti->error = "Snapshot origin struct allocation failed"; 833 goto bad_load_and_register; 834 } else if (r < 0) { 835 /* invalid handover, register_snapshot has set ti->error */ 836 goto bad_load_and_register; 837 } 838 839 /* 840 * Metadata must only be loaded into one table at once, so skip this 841 * if metadata will be handed over during resume. 842 * Chunk size will be set during the handover - set it to zero to 843 * ensure it's ignored. 844 */ 845 if (r > 0) { 846 s->store->chunk_size = 0; 847 return 0; 848 } 849 850 r = s->store->type->read_metadata(s->store, dm_add_exception, 851 (void *)s); 852 if (r < 0) { 853 ti->error = "Failed to read snapshot metadata"; 854 goto bad_read_metadata; 855 } else if (r > 0) { 856 s->valid = 0; 857 DMWARN("Snapshot is marked invalid."); 858 } 859 860 if (!s->store->chunk_size) { 861 ti->error = "Chunk size not set"; 862 goto bad_read_metadata; 863 } 864 ti->split_io = s->store->chunk_size; 865 866 return 0; 867 868bad_read_metadata: 869 unregister_snapshot(s); 870 871bad_load_and_register: 872 mempool_destroy(s->tracked_chunk_pool); 873 874bad_tracked_chunk_pool: 875 mempool_destroy(s->pending_pool); 876 877bad_pending_pool: 878 dm_kcopyd_client_destroy(s->kcopyd_client); 879 880bad_kcopyd: 881 dm_exception_table_exit(&s->pending, pending_cache); 882 dm_exception_table_exit(&s->complete, exception_cache); 883 884bad_hash_tables: 885 dm_put_device(ti, s->origin); 886 887bad_origin: 888 dm_exception_store_destroy(s->store); 889 890bad_store: 891 dm_put_device(ti, s->cow); 892 893bad_cow: 894 kfree(s); 895 896bad: 897 return r; 898} 899 900static void __free_exceptions(struct dm_snapshot *s) 901{ 902 dm_kcopyd_client_destroy(s->kcopyd_client); 903 s->kcopyd_client = NULL; 904 905 dm_exception_table_exit(&s->pending, pending_cache); 906 dm_exception_table_exit(&s->complete, exception_cache); 907} 908 909static void __handover_exceptions(struct dm_snapshot *snap_src, 910 struct dm_snapshot *snap_dest) 911{ 912 union { 913 struct dm_exception_table table_swap; 914 struct dm_exception_store *store_swap; 915 } u; 916 917 /* 918 * Swap all snapshot context information between the two instances. 919 */ 920 u.table_swap = snap_dest->complete; 921 snap_dest->complete = snap_src->complete; 922 snap_src->complete = u.table_swap; 923 924 u.store_swap = snap_dest->store; 925 snap_dest->store = snap_src->store; 926 snap_src->store = u.store_swap; 927 928 snap_dest->store->snap = snap_dest; 929 snap_src->store->snap = snap_src; 930 931 snap_dest->ti->split_io = snap_dest->store->chunk_size; 932 snap_dest->valid = snap_src->valid; 933 934 /* 935 * Set source invalid to ensure it receives no further I/O. 936 */ 937 snap_src->valid = 0; 938} 939 940static void snapshot_dtr(struct dm_target *ti) 941{ 942#ifdef CONFIG_DM_DEBUG 943 int i; 944#endif 945 struct dm_snapshot *s = ti->private; 946 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 947 948 flush_workqueue(ksnapd); 949 950 down_read(&_origins_lock); 951 /* Check whether exception handover must be cancelled */ 952 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); 953 if (snap_src && snap_dest && (s == snap_src)) { 954 down_write(&snap_dest->lock); 955 snap_dest->valid = 0; 956 up_write(&snap_dest->lock); 957 DMERR("Cancelling snapshot handover."); 958 } 959 up_read(&_origins_lock); 960 961 /* Prevent further origin writes from using this snapshot. */ 962 /* After this returns there can be no new kcopyd jobs. */ 963 unregister_snapshot(s); 964 965 while (atomic_read(&s->pending_exceptions_count)) 966 msleep(1); 967 /* 968 * Ensure instructions in mempool_destroy aren't reordered 969 * before atomic_read. 970 */ 971 smp_mb(); 972 973#ifdef CONFIG_DM_DEBUG 974 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 975 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 976#endif 977 978 mempool_destroy(s->tracked_chunk_pool); 979 980 __free_exceptions(s); 981 982 mempool_destroy(s->pending_pool); 983 984 dm_put_device(ti, s->origin); 985 986 dm_exception_store_destroy(s->store); 987 988 dm_put_device(ti, s->cow); 989 990 kfree(s); 991} 992 993/* 994 * Flush a list of buffers. 995 */ 996static void flush_bios(struct bio *bio) 997{ 998 struct bio *n; 999 1000 while (bio) { 1001 n = bio->bi_next; 1002 bio->bi_next = NULL; 1003 generic_make_request(bio); 1004 bio = n; 1005 } 1006} 1007 1008static void flush_queued_bios(struct work_struct *work) 1009{ 1010 struct dm_snapshot *s = 1011 container_of(work, struct dm_snapshot, queued_bios_work); 1012 struct bio *queued_bios; 1013 unsigned long flags; 1014 1015 spin_lock_irqsave(&s->pe_lock, flags); 1016 queued_bios = bio_list_get(&s->queued_bios); 1017 spin_unlock_irqrestore(&s->pe_lock, flags); 1018 1019 flush_bios(queued_bios); 1020} 1021 1022/* 1023 * Error a list of buffers. 1024 */ 1025static void error_bios(struct bio *bio) 1026{ 1027 struct bio *n; 1028 1029 while (bio) { 1030 n = bio->bi_next; 1031 bio->bi_next = NULL; 1032 bio_io_error(bio); 1033 bio = n; 1034 } 1035} 1036 1037static void __invalidate_snapshot(struct dm_snapshot *s, int err) 1038{ 1039 if (!s->valid) 1040 return; 1041 1042 if (err == -EIO) 1043 DMERR("Invalidating snapshot: Error reading/writing."); 1044 else if (err == -ENOMEM) 1045 DMERR("Invalidating snapshot: Unable to allocate exception."); 1046 1047 if (s->store->type->drop_snapshot) 1048 s->store->type->drop_snapshot(s->store); 1049 1050 s->valid = 0; 1051 1052 dm_table_event(s->ti->table); 1053} 1054 1055static void get_pending_exception(struct dm_snap_pending_exception *pe) 1056{ 1057 atomic_inc(&pe->ref_count); 1058} 1059 1060static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 1061{ 1062 struct dm_snap_pending_exception *primary_pe; 1063 struct bio *origin_bios = NULL; 1064 1065 primary_pe = pe->primary_pe; 1066 1067 /* 1068 * If this pe is involved in a write to the origin and 1069 * it is the last sibling to complete then release 1070 * the bios for the original write to the origin. 1071 */ 1072 if (primary_pe && 1073 atomic_dec_and_test(&primary_pe->ref_count)) { 1074 origin_bios = bio_list_get(&primary_pe->origin_bios); 1075 free_pending_exception(primary_pe); 1076 } 1077 1078 /* 1079 * Free the pe if it's not linked to an origin write or if 1080 * it's not itself a primary pe. 1081 */ 1082 if (!primary_pe || primary_pe != pe) 1083 free_pending_exception(pe); 1084 1085 return origin_bios; 1086} 1087 1088static void pending_complete(struct dm_snap_pending_exception *pe, int success) 1089{ 1090 struct dm_exception *e; 1091 struct dm_snapshot *s = pe->snap; 1092 struct bio *origin_bios = NULL; 1093 struct bio *snapshot_bios = NULL; 1094 int error = 0; 1095 1096 if (!success) { 1097 /* Read/write error - snapshot is unusable */ 1098 down_write(&s->lock); 1099 __invalidate_snapshot(s, -EIO); 1100 error = 1; 1101 goto out; 1102 } 1103 1104 e = alloc_completed_exception(); 1105 if (!e) { 1106 down_write(&s->lock); 1107 __invalidate_snapshot(s, -ENOMEM); 1108 error = 1; 1109 goto out; 1110 } 1111 *e = pe->e; 1112 1113 down_write(&s->lock); 1114 if (!s->valid) { 1115 free_completed_exception(e); 1116 error = 1; 1117 goto out; 1118 } 1119 1120 /* Check for conflicting reads */ 1121 __check_for_conflicting_io(s, pe->e.old_chunk); 1122 1123 /* 1124 * Add a proper exception, and remove the 1125 * in-flight exception from the list. 1126 */ 1127 dm_insert_exception(&s->complete, e); 1128 1129 out: 1130 dm_remove_exception(&pe->e); 1131 snapshot_bios = bio_list_get(&pe->snapshot_bios); 1132 origin_bios = put_pending_exception(pe); 1133 1134 up_write(&s->lock); 1135 1136 /* Submit any pending write bios */ 1137 if (error) 1138 error_bios(snapshot_bios); 1139 else 1140 flush_bios(snapshot_bios); 1141 1142 flush_bios(origin_bios); 1143} 1144 1145static void commit_callback(void *context, int success) 1146{ 1147 struct dm_snap_pending_exception *pe = context; 1148 1149 pending_complete(pe, success); 1150} 1151 1152/* 1153 * Called when the copy I/O has finished. kcopyd actually runs 1154 * this code so don't block. 1155 */ 1156static void copy_callback(int read_err, unsigned long write_err, void *context) 1157{ 1158 struct dm_snap_pending_exception *pe = context; 1159 struct dm_snapshot *s = pe->snap; 1160 1161 if (read_err || write_err) 1162 pending_complete(pe, 0); 1163 1164 else 1165 /* Update the metadata if we are persistent */ 1166 s->store->type->commit_exception(s->store, &pe->e, 1167 commit_callback, pe); 1168} 1169 1170/* 1171 * Dispatches the copy operation to kcopyd. 1172 */ 1173static void start_copy(struct dm_snap_pending_exception *pe) 1174{ 1175 struct dm_snapshot *s = pe->snap; 1176 struct dm_io_region src, dest; 1177 struct block_device *bdev = s->origin->bdev; 1178 sector_t dev_size; 1179 1180 dev_size = get_dev_size(bdev); 1181 1182 src.bdev = bdev; 1183 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 1184 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); 1185 1186 dest.bdev = s->cow->bdev; 1187 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 1188 dest.count = src.count; 1189 1190 /* Hand over to kcopyd */ 1191 dm_kcopyd_copy(s->kcopyd_client, 1192 &src, 1, &dest, 0, copy_callback, pe); 1193} 1194 1195static struct dm_snap_pending_exception * 1196__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 1197{ 1198 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); 1199 1200 if (!e) 1201 return NULL; 1202 1203 return container_of(e, struct dm_snap_pending_exception, e); 1204} 1205 1206/* 1207 * Looks to see if this snapshot already has a pending exception 1208 * for this chunk, otherwise it allocates a new one and inserts 1209 * it into the pending table. 1210 * 1211 * NOTE: a write lock must be held on snap->lock before calling 1212 * this. 1213 */ 1214static struct dm_snap_pending_exception * 1215__find_pending_exception(struct dm_snapshot *s, 1216 struct dm_snap_pending_exception *pe, chunk_t chunk) 1217{ 1218 struct dm_snap_pending_exception *pe2; 1219 1220 pe2 = __lookup_pending_exception(s, chunk); 1221 if (pe2) { 1222 free_pending_exception(pe); 1223 return pe2; 1224 } 1225 1226 pe->e.old_chunk = chunk; 1227 bio_list_init(&pe->origin_bios); 1228 bio_list_init(&pe->snapshot_bios); 1229 pe->primary_pe = NULL; 1230 atomic_set(&pe->ref_count, 0); 1231 pe->started = 0; 1232 1233 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1234 free_pending_exception(pe); 1235 return NULL; 1236 } 1237 1238 get_pending_exception(pe); 1239 dm_insert_exception(&s->pending, &pe->e); 1240 1241 return pe; 1242} 1243 1244static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, 1245 struct bio *bio, chunk_t chunk) 1246{ 1247 bio->bi_bdev = s->cow->bdev; 1248 bio->bi_sector = chunk_to_sector(s->store, 1249 dm_chunk_number(e->new_chunk) + 1250 (chunk - e->old_chunk)) + 1251 (bio->bi_sector & 1252 s->store->chunk_mask); 1253} 1254 1255static int snapshot_map(struct dm_target *ti, struct bio *bio, 1256 union map_info *map_context) 1257{ 1258 struct dm_exception *e; 1259 struct dm_snapshot *s = ti->private; 1260 int r = DM_MAPIO_REMAPPED; 1261 chunk_t chunk; 1262 struct dm_snap_pending_exception *pe = NULL; 1263 1264 if (unlikely(bio_empty_barrier(bio))) { 1265 bio->bi_bdev = s->cow->bdev; 1266 return DM_MAPIO_REMAPPED; 1267 } 1268 1269 chunk = sector_to_chunk(s->store, bio->bi_sector); 1270 1271 /* Full snapshots are not usable */ 1272 /* To get here the table must be live so s->active is always set. */ 1273 if (!s->valid) 1274 return -EIO; 1275 1276 /* FIXME: should only take write lock if we need 1277 * to copy an exception */ 1278 down_write(&s->lock); 1279 1280 if (!s->valid) { 1281 r = -EIO; 1282 goto out_unlock; 1283 } 1284 1285 /* If the block is already remapped - use that, else remap it */ 1286 e = dm_lookup_exception(&s->complete, chunk); 1287 if (e) { 1288 remap_exception(s, e, bio, chunk); 1289 goto out_unlock; 1290 } 1291 1292 /* 1293 * Write to snapshot - higher level takes care of RW/RO 1294 * flags so we should only get this if we are 1295 * writeable. 1296 */ 1297 if (bio_rw(bio) == WRITE) { 1298 pe = __lookup_pending_exception(s, chunk); 1299 if (!pe) { 1300 up_write(&s->lock); 1301 pe = alloc_pending_exception(s); 1302 down_write(&s->lock); 1303 1304 if (!s->valid) { 1305 free_pending_exception(pe); 1306 r = -EIO; 1307 goto out_unlock; 1308 } 1309 1310 e = dm_lookup_exception(&s->complete, chunk); 1311 if (e) { 1312 free_pending_exception(pe); 1313 remap_exception(s, e, bio, chunk); 1314 goto out_unlock; 1315 } 1316 1317 pe = __find_pending_exception(s, pe, chunk); 1318 if (!pe) { 1319 __invalidate_snapshot(s, -ENOMEM); 1320 r = -EIO; 1321 goto out_unlock; 1322 } 1323 } 1324 1325 remap_exception(s, &pe->e, bio, chunk); 1326 bio_list_add(&pe->snapshot_bios, bio); 1327 1328 r = DM_MAPIO_SUBMITTED; 1329 1330 if (!pe->started) { 1331 /* this is protected by snap->lock */ 1332 pe->started = 1; 1333 up_write(&s->lock); 1334 start_copy(pe); 1335 goto out; 1336 } 1337 } else { 1338 bio->bi_bdev = s->origin->bdev; 1339 map_context->ptr = track_chunk(s, chunk); 1340 } 1341 1342 out_unlock: 1343 up_write(&s->lock); 1344 out: 1345 return r; 1346} 1347 1348static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1349 int error, union map_info *map_context) 1350{ 1351 struct dm_snapshot *s = ti->private; 1352 struct dm_snap_tracked_chunk *c = map_context->ptr; 1353 1354 if (c) 1355 stop_tracking_chunk(s, c); 1356 1357 return 0; 1358} 1359 1360static void snapshot_postsuspend(struct dm_target *ti) 1361{ 1362 struct dm_snapshot *s = ti->private; 1363 1364 down_write(&s->lock); 1365 s->suspended = 1; 1366 up_write(&s->lock); 1367} 1368 1369static int snapshot_preresume(struct dm_target *ti) 1370{ 1371 int r = 0; 1372 struct dm_snapshot *s = ti->private; 1373 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1374 1375 down_read(&_origins_lock); 1376 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); 1377 if (snap_src && snap_dest) { 1378 down_read(&snap_src->lock); 1379 if (s == snap_src) { 1380 DMERR("Unable to resume snapshot source until " 1381 "handover completes."); 1382 r = -EINVAL; 1383 } else if (!snap_src->suspended) { 1384 DMERR("Unable to perform snapshot handover until " 1385 "source is suspended."); 1386 r = -EINVAL; 1387 } 1388 up_read(&snap_src->lock); 1389 } 1390 up_read(&_origins_lock); 1391 1392 return r; 1393} 1394 1395static void snapshot_resume(struct dm_target *ti) 1396{ 1397 struct dm_snapshot *s = ti->private; 1398 struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; 1399 1400 down_read(&_origins_lock); 1401 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); 1402 if (snap_src && snap_dest) { 1403 down_write(&snap_src->lock); 1404 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); 1405 __handover_exceptions(snap_src, snap_dest); 1406 up_write(&snap_dest->lock); 1407 up_write(&snap_src->lock); 1408 } 1409 up_read(&_origins_lock); 1410 1411 /* Now we have correct chunk size, reregister */ 1412 reregister_snapshot(s); 1413 1414 down_write(&s->lock); 1415 s->active = 1; 1416 s->suspended = 0; 1417 up_write(&s->lock); 1418} 1419 1420static int snapshot_status(struct dm_target *ti, status_type_t type, 1421 char *result, unsigned int maxlen) 1422{ 1423 unsigned sz = 0; 1424 struct dm_snapshot *snap = ti->private; 1425 1426 switch (type) { 1427 case STATUSTYPE_INFO: 1428 1429 down_write(&snap->lock); 1430 1431 if (!snap->valid) 1432 DMEMIT("Invalid"); 1433 else { 1434 if (snap->store->type->usage) { 1435 sector_t total_sectors, sectors_allocated, 1436 metadata_sectors; 1437 snap->store->type->usage(snap->store, 1438 &total_sectors, 1439 §ors_allocated, 1440 &metadata_sectors); 1441 DMEMIT("%llu/%llu %llu", 1442 (unsigned long long)sectors_allocated, 1443 (unsigned long long)total_sectors, 1444 (unsigned long long)metadata_sectors); 1445 } 1446 else 1447 DMEMIT("Unknown"); 1448 } 1449 1450 up_write(&snap->lock); 1451 1452 break; 1453 1454 case STATUSTYPE_TABLE: 1455 /* 1456 * kdevname returns a static pointer so we need 1457 * to make private copies if the output is to 1458 * make sense. 1459 */ 1460 DMEMIT("%s %s", snap->origin->name, snap->cow->name); 1461 snap->store->type->status(snap->store, type, result + sz, 1462 maxlen - sz); 1463 break; 1464 } 1465 1466 return 0; 1467} 1468 1469static int snapshot_iterate_devices(struct dm_target *ti, 1470 iterate_devices_callout_fn fn, void *data) 1471{ 1472 struct dm_snapshot *snap = ti->private; 1473 1474 return fn(ti, snap->origin, 0, ti->len, data); 1475} 1476 1477 1478/*----------------------------------------------------------------- 1479 * Origin methods 1480 *---------------------------------------------------------------*/ 1481 1482/* 1483 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any 1484 * supplied bio was ignored. The caller may submit it immediately. 1485 * (No remapping actually occurs as the origin is always a direct linear 1486 * map.) 1487 * 1488 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned 1489 * and any supplied bio is added to a list to be submitted once all 1490 * the necessary exceptions exist. 1491 */ 1492static int __origin_write(struct list_head *snapshots, sector_t sector, 1493 struct bio *bio) 1494{ 1495 int r = DM_MAPIO_REMAPPED, first = 0; 1496 struct dm_snapshot *snap; 1497 struct dm_exception *e; 1498 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1499 chunk_t chunk; 1500 LIST_HEAD(pe_queue); 1501 1502 /* Do all the snapshots on this origin */ 1503 list_for_each_entry (snap, snapshots, list) { 1504 1505 down_write(&snap->lock); 1506 1507 /* Only deal with valid and active snapshots */ 1508 if (!snap->valid || !snap->active) 1509 goto next_snapshot; 1510 1511 /* Nothing to do if writing beyond end of snapshot */ 1512 if (sector >= dm_table_get_size(snap->ti->table)) 1513 goto next_snapshot; 1514 1515 /* 1516 * Remember, different snapshots can have 1517 * different chunk sizes. 1518 */ 1519 chunk = sector_to_chunk(snap->store, sector); 1520 1521 /* 1522 * Check exception table to see if block 1523 * is already remapped in this snapshot 1524 * and trigger an exception if not. 1525 * 1526 * ref_count is initialised to 1 so pending_complete() 1527 * won't destroy the primary_pe while we're inside this loop. 1528 */ 1529 e = dm_lookup_exception(&snap->complete, chunk); 1530 if (e) 1531 goto next_snapshot; 1532 1533 pe = __lookup_pending_exception(snap, chunk); 1534 if (!pe) { 1535 up_write(&snap->lock); 1536 pe = alloc_pending_exception(snap); 1537 down_write(&snap->lock); 1538 1539 if (!snap->valid) { 1540 free_pending_exception(pe); 1541 goto next_snapshot; 1542 } 1543 1544 e = dm_lookup_exception(&snap->complete, chunk); 1545 if (e) { 1546 free_pending_exception(pe); 1547 goto next_snapshot; 1548 } 1549 1550 pe = __find_pending_exception(snap, pe, chunk); 1551 if (!pe) { 1552 __invalidate_snapshot(snap, -ENOMEM); 1553 goto next_snapshot; 1554 } 1555 } 1556 1557 if (!primary_pe) { 1558 /* 1559 * Either every pe here has same 1560 * primary_pe or none has one yet. 1561 */ 1562 if (pe->primary_pe) 1563 primary_pe = pe->primary_pe; 1564 else { 1565 primary_pe = pe; 1566 first = 1; 1567 } 1568 1569 if (bio) 1570 bio_list_add(&primary_pe->origin_bios, bio); 1571 1572 r = DM_MAPIO_SUBMITTED; 1573 } 1574 1575 if (!pe->primary_pe) { 1576 pe->primary_pe = primary_pe; 1577 get_pending_exception(primary_pe); 1578 } 1579 1580 if (!pe->started) { 1581 pe->started = 1; 1582 list_add_tail(&pe->list, &pe_queue); 1583 } 1584 1585 next_snapshot: 1586 up_write(&snap->lock); 1587 } 1588 1589 if (!primary_pe) 1590 return r; 1591 1592 /* 1593 * If this is the first time we're processing this chunk and 1594 * ref_count is now 1 it means all the pending exceptions 1595 * got completed while we were in the loop above, so it falls to 1596 * us here to remove the primary_pe and submit any origin_bios. 1597 */ 1598 1599 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1600 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1601 free_pending_exception(primary_pe); 1602 /* If we got here, pe_queue is necessarily empty. */ 1603 return r; 1604 } 1605 1606 /* 1607 * Now that we have a complete pe list we can start the copying. 1608 */ 1609 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1610 start_copy(pe); 1611 1612 return r; 1613} 1614 1615/* 1616 * Called on a write from the origin driver. 1617 */ 1618static int do_origin(struct dm_dev *origin, struct bio *bio) 1619{ 1620 struct origin *o; 1621 int r = DM_MAPIO_REMAPPED; 1622 1623 down_read(&_origins_lock); 1624 o = __lookup_origin(origin->bdev); 1625 if (o) 1626 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 1627 up_read(&_origins_lock); 1628 1629 return r; 1630} 1631 1632/* 1633 * Origin: maps a linear range of a device, with hooks for snapshotting. 1634 */ 1635 1636/* 1637 * Construct an origin mapping: <dev_path> 1638 * The context for an origin is merely a 'struct dm_dev *' 1639 * pointing to the real device. 1640 */ 1641static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1642{ 1643 int r; 1644 struct dm_dev *dev; 1645 1646 if (argc != 1) { 1647 ti->error = "origin: incorrect number of arguments"; 1648 return -EINVAL; 1649 } 1650 1651 r = dm_get_device(ti, argv[0], 0, ti->len, 1652 dm_table_get_mode(ti->table), &dev); 1653 if (r) { 1654 ti->error = "Cannot get target device"; 1655 return r; 1656 } 1657 1658 ti->private = dev; 1659 ti->num_flush_requests = 1; 1660 1661 return 0; 1662} 1663 1664static void origin_dtr(struct dm_target *ti) 1665{ 1666 struct dm_dev *dev = ti->private; 1667 dm_put_device(ti, dev); 1668} 1669 1670static int origin_map(struct dm_target *ti, struct bio *bio, 1671 union map_info *map_context) 1672{ 1673 struct dm_dev *dev = ti->private; 1674 bio->bi_bdev = dev->bdev; 1675 1676 if (unlikely(bio_empty_barrier(bio))) 1677 return DM_MAPIO_REMAPPED; 1678 1679 /* Only tell snapshots if this is a write */ 1680 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1681} 1682 1683/* 1684 * Set the target "split_io" field to the minimum of all the snapshots' 1685 * chunk sizes. 1686 */ 1687static void origin_resume(struct dm_target *ti) 1688{ 1689 struct dm_dev *dev = ti->private; 1690 1691 down_read(&_origins_lock); 1692 1693 ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev)); 1694 1695 up_read(&_origins_lock); 1696} 1697 1698static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1699 unsigned int maxlen) 1700{ 1701 struct dm_dev *dev = ti->private; 1702 1703 switch (type) { 1704 case STATUSTYPE_INFO: 1705 result[0] = '\0'; 1706 break; 1707 1708 case STATUSTYPE_TABLE: 1709 snprintf(result, maxlen, "%s", dev->name); 1710 break; 1711 } 1712 1713 return 0; 1714} 1715 1716static int origin_iterate_devices(struct dm_target *ti, 1717 iterate_devices_callout_fn fn, void *data) 1718{ 1719 struct dm_dev *dev = ti->private; 1720 1721 return fn(ti, dev, 0, ti->len, data); 1722} 1723 1724static struct target_type origin_target = { 1725 .name = "snapshot-origin", 1726 .version = {1, 7, 0}, 1727 .module = THIS_MODULE, 1728 .ctr = origin_ctr, 1729 .dtr = origin_dtr, 1730 .map = origin_map, 1731 .resume = origin_resume, 1732 .status = origin_status, 1733 .iterate_devices = origin_iterate_devices, 1734}; 1735 1736static struct target_type snapshot_target = { 1737 .name = "snapshot", 1738 .version = {1, 9, 0}, 1739 .module = THIS_MODULE, 1740 .ctr = snapshot_ctr, 1741 .dtr = snapshot_dtr, 1742 .map = snapshot_map, 1743 .end_io = snapshot_end_io, 1744 .postsuspend = snapshot_postsuspend, 1745 .preresume = snapshot_preresume, 1746 .resume = snapshot_resume, 1747 .status = snapshot_status, 1748 .iterate_devices = snapshot_iterate_devices, 1749}; 1750 1751static struct target_type merge_target = { 1752 .name = dm_snapshot_merge_target_name, 1753 .version = {1, 0, 0}, 1754 .module = THIS_MODULE, 1755 .ctr = snapshot_ctr, 1756 .dtr = snapshot_dtr, 1757 .map = snapshot_map, 1758 .end_io = snapshot_end_io, 1759 .postsuspend = snapshot_postsuspend, 1760 .preresume = snapshot_preresume, 1761 .resume = snapshot_resume, 1762 .status = snapshot_status, 1763 .iterate_devices = snapshot_iterate_devices, 1764}; 1765 1766static int __init dm_snapshot_init(void) 1767{ 1768 int r; 1769 1770 r = dm_exception_store_init(); 1771 if (r) { 1772 DMERR("Failed to initialize exception stores"); 1773 return r; 1774 } 1775 1776 r = dm_register_target(&snapshot_target); 1777 if (r < 0) { 1778 DMERR("snapshot target register failed %d", r); 1779 goto bad_register_snapshot_target; 1780 } 1781 1782 r = dm_register_target(&origin_target); 1783 if (r < 0) { 1784 DMERR("Origin target register failed %d", r); 1785 goto bad_register_origin_target; 1786 } 1787 1788 r = dm_register_target(&merge_target); 1789 if (r < 0) { 1790 DMERR("Merge target register failed %d", r); 1791 goto bad_register_merge_target; 1792 } 1793 1794 r = init_origin_hash(); 1795 if (r) { 1796 DMERR("init_origin_hash failed."); 1797 goto bad_origin_hash; 1798 } 1799 1800 exception_cache = KMEM_CACHE(dm_exception, 0); 1801 if (!exception_cache) { 1802 DMERR("Couldn't create exception cache."); 1803 r = -ENOMEM; 1804 goto bad_exception_cache; 1805 } 1806 1807 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1808 if (!pending_cache) { 1809 DMERR("Couldn't create pending cache."); 1810 r = -ENOMEM; 1811 goto bad_pending_cache; 1812 } 1813 1814 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1815 if (!tracked_chunk_cache) { 1816 DMERR("Couldn't create cache to track chunks in use."); 1817 r = -ENOMEM; 1818 goto bad_tracked_chunk_cache; 1819 } 1820 1821 ksnapd = create_singlethread_workqueue("ksnapd"); 1822 if (!ksnapd) { 1823 DMERR("Failed to create ksnapd workqueue."); 1824 r = -ENOMEM; 1825 goto bad_pending_pool; 1826 } 1827 1828 return 0; 1829 1830bad_pending_pool: 1831 kmem_cache_destroy(tracked_chunk_cache); 1832bad_tracked_chunk_cache: 1833 kmem_cache_destroy(pending_cache); 1834bad_pending_cache: 1835 kmem_cache_destroy(exception_cache); 1836bad_exception_cache: 1837 exit_origin_hash(); 1838bad_origin_hash: 1839 dm_unregister_target(&merge_target); 1840bad_register_merge_target: 1841 dm_unregister_target(&origin_target); 1842bad_register_origin_target: 1843 dm_unregister_target(&snapshot_target); 1844bad_register_snapshot_target: 1845 dm_exception_store_exit(); 1846 1847 return r; 1848} 1849 1850static void __exit dm_snapshot_exit(void) 1851{ 1852 destroy_workqueue(ksnapd); 1853 1854 dm_unregister_target(&snapshot_target); 1855 dm_unregister_target(&origin_target); 1856 dm_unregister_target(&merge_target); 1857 1858 exit_origin_hash(); 1859 kmem_cache_destroy(pending_cache); 1860 kmem_cache_destroy(exception_cache); 1861 kmem_cache_destroy(tracked_chunk_cache); 1862 1863 dm_exception_store_exit(); 1864} 1865 1866/* Module hooks */ 1867module_init(dm_snapshot_init); 1868module_exit(dm_snapshot_exit); 1869 1870MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1871MODULE_AUTHOR("Joe Thornber"); 1872MODULE_LICENSE("GPL"); 1873