dm-snap.c revision 4c6fff445d7aa753957856278d4d93bcad6e2c14
1/* 2 * dm-snapshot.c 3 * 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 5 * 6 * This file is released under the GPL. 7 */ 8 9#include <linux/blkdev.h> 10#include <linux/device-mapper.h> 11#include <linux/delay.h> 12#include <linux/fs.h> 13#include <linux/init.h> 14#include <linux/kdev_t.h> 15#include <linux/list.h> 16#include <linux/mempool.h> 17#include <linux/module.h> 18#include <linux/slab.h> 19#include <linux/vmalloc.h> 20#include <linux/log2.h> 21#include <linux/dm-kcopyd.h> 22#include <linux/workqueue.h> 23 24#include "dm-exception-store.h" 25 26#define DM_MSG_PREFIX "snapshots" 27 28/* 29 * The percentage increment we will wake up users at 30 */ 31#define WAKE_UP_PERCENT 5 32 33/* 34 * kcopyd priority of snapshot operations 35 */ 36#define SNAPSHOT_COPY_PRIORITY 2 37 38/* 39 * Reserve 1MB for each snapshot initially (with minimum of 1 page). 40 */ 41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) 42 43/* 44 * The size of the mempool used to track chunks in use. 45 */ 46#define MIN_IOS 256 47 48#define DM_TRACKED_CHUNK_HASH_SIZE 16 49#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ 50 (DM_TRACKED_CHUNK_HASH_SIZE - 1)) 51 52struct exception_table { 53 uint32_t hash_mask; 54 unsigned hash_shift; 55 struct list_head *table; 56}; 57 58struct dm_snapshot { 59 struct rw_semaphore lock; 60 61 struct dm_dev *origin; 62 63 /* List of snapshots per Origin */ 64 struct list_head list; 65 66 /* You can't use a snapshot if this is 0 (e.g. if full) */ 67 int valid; 68 69 /* Origin writes don't trigger exceptions until this is set */ 70 int active; 71 72 mempool_t *pending_pool; 73 74 atomic_t pending_exceptions_count; 75 76 struct exception_table pending; 77 struct exception_table complete; 78 79 /* 80 * pe_lock protects all pending_exception operations and access 81 * as well as the snapshot_bios list. 82 */ 83 spinlock_t pe_lock; 84 85 /* The on disk metadata handler */ 86 struct dm_exception_store *store; 87 88 struct dm_kcopyd_client *kcopyd_client; 89 90 /* Queue of snapshot writes for ksnapd to flush */ 91 struct bio_list queued_bios; 92 struct work_struct queued_bios_work; 93 94 /* Chunks with outstanding reads */ 95 mempool_t *tracked_chunk_pool; 96 spinlock_t tracked_chunk_lock; 97 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 98}; 99 100static struct workqueue_struct *ksnapd; 101static void flush_queued_bios(struct work_struct *work); 102 103static sector_t chunk_to_sector(struct dm_exception_store *store, 104 chunk_t chunk) 105{ 106 return chunk << store->chunk_shift; 107} 108 109static int bdev_equal(struct block_device *lhs, struct block_device *rhs) 110{ 111 /* 112 * There is only ever one instance of a particular block 113 * device so we can compare pointers safely. 114 */ 115 return lhs == rhs; 116} 117 118struct dm_snap_pending_exception { 119 struct dm_snap_exception e; 120 121 /* 122 * Origin buffers waiting for this to complete are held 123 * in a bio list 124 */ 125 struct bio_list origin_bios; 126 struct bio_list snapshot_bios; 127 128 /* 129 * Short-term queue of pending exceptions prior to submission. 130 */ 131 struct list_head list; 132 133 /* 134 * The primary pending_exception is the one that holds 135 * the ref_count and the list of origin_bios for a 136 * group of pending_exceptions. It is always last to get freed. 137 * These fields get set up when writing to the origin. 138 */ 139 struct dm_snap_pending_exception *primary_pe; 140 141 /* 142 * Number of pending_exceptions processing this chunk. 143 * When this drops to zero we must complete the origin bios. 144 * If incrementing or decrementing this, hold pe->snap->lock for 145 * the sibling concerned and not pe->primary_pe->snap->lock unless 146 * they are the same. 147 */ 148 atomic_t ref_count; 149 150 /* Pointer back to snapshot context */ 151 struct dm_snapshot *snap; 152 153 /* 154 * 1 indicates the exception has already been sent to 155 * kcopyd. 156 */ 157 int started; 158}; 159 160/* 161 * Hash table mapping origin volumes to lists of snapshots and 162 * a lock to protect it 163 */ 164static struct kmem_cache *exception_cache; 165static struct kmem_cache *pending_cache; 166 167struct dm_snap_tracked_chunk { 168 struct hlist_node node; 169 chunk_t chunk; 170}; 171 172static struct kmem_cache *tracked_chunk_cache; 173 174static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 175 chunk_t chunk) 176{ 177 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 178 GFP_NOIO); 179 unsigned long flags; 180 181 c->chunk = chunk; 182 183 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 184 hlist_add_head(&c->node, 185 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 186 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 187 188 return c; 189} 190 191static void stop_tracking_chunk(struct dm_snapshot *s, 192 struct dm_snap_tracked_chunk *c) 193{ 194 unsigned long flags; 195 196 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 197 hlist_del(&c->node); 198 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 199 200 mempool_free(c, s->tracked_chunk_pool); 201} 202 203static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) 204{ 205 struct dm_snap_tracked_chunk *c; 206 struct hlist_node *hn; 207 int found = 0; 208 209 spin_lock_irq(&s->tracked_chunk_lock); 210 211 hlist_for_each_entry(c, hn, 212 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { 213 if (c->chunk == chunk) { 214 found = 1; 215 break; 216 } 217 } 218 219 spin_unlock_irq(&s->tracked_chunk_lock); 220 221 return found; 222} 223 224/* 225 * One of these per registered origin, held in the snapshot_origins hash 226 */ 227struct origin { 228 /* The origin device */ 229 struct block_device *bdev; 230 231 struct list_head hash_list; 232 233 /* List of snapshots for this origin */ 234 struct list_head snapshots; 235}; 236 237/* 238 * Size of the hash table for origin volumes. If we make this 239 * the size of the minors list then it should be nearly perfect 240 */ 241#define ORIGIN_HASH_SIZE 256 242#define ORIGIN_MASK 0xFF 243static struct list_head *_origins; 244static struct rw_semaphore _origins_lock; 245 246static int init_origin_hash(void) 247{ 248 int i; 249 250 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), 251 GFP_KERNEL); 252 if (!_origins) { 253 DMERR("unable to allocate memory"); 254 return -ENOMEM; 255 } 256 257 for (i = 0; i < ORIGIN_HASH_SIZE; i++) 258 INIT_LIST_HEAD(_origins + i); 259 init_rwsem(&_origins_lock); 260 261 return 0; 262} 263 264static void exit_origin_hash(void) 265{ 266 kfree(_origins); 267} 268 269static unsigned origin_hash(struct block_device *bdev) 270{ 271 return bdev->bd_dev & ORIGIN_MASK; 272} 273 274static struct origin *__lookup_origin(struct block_device *origin) 275{ 276 struct list_head *ol; 277 struct origin *o; 278 279 ol = &_origins[origin_hash(origin)]; 280 list_for_each_entry (o, ol, hash_list) 281 if (bdev_equal(o->bdev, origin)) 282 return o; 283 284 return NULL; 285} 286 287static void __insert_origin(struct origin *o) 288{ 289 struct list_head *sl = &_origins[origin_hash(o->bdev)]; 290 list_add_tail(&o->hash_list, sl); 291} 292 293/* 294 * Make a note of the snapshot and its origin so we can look it 295 * up when the origin has a write on it. 296 */ 297static int register_snapshot(struct dm_snapshot *snap) 298{ 299 struct dm_snapshot *l; 300 struct origin *o, *new_o; 301 struct block_device *bdev = snap->origin->bdev; 302 303 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); 304 if (!new_o) 305 return -ENOMEM; 306 307 down_write(&_origins_lock); 308 o = __lookup_origin(bdev); 309 310 if (o) 311 kfree(new_o); 312 else { 313 /* New origin */ 314 o = new_o; 315 316 /* Initialise the struct */ 317 INIT_LIST_HEAD(&o->snapshots); 318 o->bdev = bdev; 319 320 __insert_origin(o); 321 } 322 323 /* Sort the list according to chunk size, largest-first smallest-last */ 324 list_for_each_entry(l, &o->snapshots, list) 325 if (l->store->chunk_size < snap->store->chunk_size) 326 break; 327 list_add_tail(&snap->list, &l->list); 328 329 up_write(&_origins_lock); 330 return 0; 331} 332 333static void unregister_snapshot(struct dm_snapshot *s) 334{ 335 struct origin *o; 336 337 down_write(&_origins_lock); 338 o = __lookup_origin(s->origin->bdev); 339 340 list_del(&s->list); 341 if (list_empty(&o->snapshots)) { 342 list_del(&o->hash_list); 343 kfree(o); 344 } 345 346 up_write(&_origins_lock); 347} 348 349/* 350 * Implementation of the exception hash tables. 351 * The lowest hash_shift bits of the chunk number are ignored, allowing 352 * some consecutive chunks to be grouped together. 353 */ 354static int init_exception_table(struct exception_table *et, uint32_t size, 355 unsigned hash_shift) 356{ 357 unsigned int i; 358 359 et->hash_shift = hash_shift; 360 et->hash_mask = size - 1; 361 et->table = dm_vcalloc(size, sizeof(struct list_head)); 362 if (!et->table) 363 return -ENOMEM; 364 365 for (i = 0; i < size; i++) 366 INIT_LIST_HEAD(et->table + i); 367 368 return 0; 369} 370 371static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 372{ 373 struct list_head *slot; 374 struct dm_snap_exception *ex, *next; 375 int i, size; 376 377 size = et->hash_mask + 1; 378 for (i = 0; i < size; i++) { 379 slot = et->table + i; 380 381 list_for_each_entry_safe (ex, next, slot, hash_list) 382 kmem_cache_free(mem, ex); 383 } 384 385 vfree(et->table); 386} 387 388static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) 389{ 390 return (chunk >> et->hash_shift) & et->hash_mask; 391} 392 393static void insert_exception(struct exception_table *eh, 394 struct dm_snap_exception *e) 395{ 396 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; 397 list_add(&e->hash_list, l); 398} 399 400static void remove_exception(struct dm_snap_exception *e) 401{ 402 list_del(&e->hash_list); 403} 404 405/* 406 * Return the exception data for a sector, or NULL if not 407 * remapped. 408 */ 409static struct dm_snap_exception *lookup_exception(struct exception_table *et, 410 chunk_t chunk) 411{ 412 struct list_head *slot; 413 struct dm_snap_exception *e; 414 415 slot = &et->table[exception_hash(et, chunk)]; 416 list_for_each_entry (e, slot, hash_list) 417 if (chunk >= e->old_chunk && 418 chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) 419 return e; 420 421 return NULL; 422} 423 424static struct dm_snap_exception *alloc_exception(void) 425{ 426 struct dm_snap_exception *e; 427 428 e = kmem_cache_alloc(exception_cache, GFP_NOIO); 429 if (!e) 430 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); 431 432 return e; 433} 434 435static void free_exception(struct dm_snap_exception *e) 436{ 437 kmem_cache_free(exception_cache, e); 438} 439 440static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) 441{ 442 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, 443 GFP_NOIO); 444 445 atomic_inc(&s->pending_exceptions_count); 446 pe->snap = s; 447 448 return pe; 449} 450 451static void free_pending_exception(struct dm_snap_pending_exception *pe) 452{ 453 struct dm_snapshot *s = pe->snap; 454 455 mempool_free(pe, s->pending_pool); 456 smp_mb__before_atomic_dec(); 457 atomic_dec(&s->pending_exceptions_count); 458} 459 460static void insert_completed_exception(struct dm_snapshot *s, 461 struct dm_snap_exception *new_e) 462{ 463 struct exception_table *eh = &s->complete; 464 struct list_head *l; 465 struct dm_snap_exception *e = NULL; 466 467 l = &eh->table[exception_hash(eh, new_e->old_chunk)]; 468 469 /* Add immediately if this table doesn't support consecutive chunks */ 470 if (!eh->hash_shift) 471 goto out; 472 473 /* List is ordered by old_chunk */ 474 list_for_each_entry_reverse(e, l, hash_list) { 475 /* Insert after an existing chunk? */ 476 if (new_e->old_chunk == (e->old_chunk + 477 dm_consecutive_chunk_count(e) + 1) && 478 new_e->new_chunk == (dm_chunk_number(e->new_chunk) + 479 dm_consecutive_chunk_count(e) + 1)) { 480 dm_consecutive_chunk_count_inc(e); 481 free_exception(new_e); 482 return; 483 } 484 485 /* Insert before an existing chunk? */ 486 if (new_e->old_chunk == (e->old_chunk - 1) && 487 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { 488 dm_consecutive_chunk_count_inc(e); 489 e->old_chunk--; 490 e->new_chunk--; 491 free_exception(new_e); 492 return; 493 } 494 495 if (new_e->old_chunk > e->old_chunk) 496 break; 497 } 498 499out: 500 list_add(&new_e->hash_list, e ? &e->hash_list : l); 501} 502 503/* 504 * Callback used by the exception stores to load exceptions when 505 * initialising. 506 */ 507static int dm_add_exception(void *context, chunk_t old, chunk_t new) 508{ 509 struct dm_snapshot *s = context; 510 struct dm_snap_exception *e; 511 512 e = alloc_exception(); 513 if (!e) 514 return -ENOMEM; 515 516 e->old_chunk = old; 517 518 /* Consecutive_count is implicitly initialised to zero */ 519 e->new_chunk = new; 520 521 insert_completed_exception(s, e); 522 523 return 0; 524} 525 526/* 527 * Hard coded magic. 528 */ 529static int calc_max_buckets(void) 530{ 531 /* use a fixed size of 2MB */ 532 unsigned long mem = 2 * 1024 * 1024; 533 mem /= sizeof(struct list_head); 534 535 return mem; 536} 537 538/* 539 * Allocate room for a suitable hash table. 540 */ 541static int init_hash_tables(struct dm_snapshot *s) 542{ 543 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; 544 545 /* 546 * Calculate based on the size of the original volume or 547 * the COW volume... 548 */ 549 cow_dev_size = get_dev_size(s->store->cow->bdev); 550 origin_dev_size = get_dev_size(s->origin->bdev); 551 max_buckets = calc_max_buckets(); 552 553 hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; 554 hash_size = min(hash_size, max_buckets); 555 556 hash_size = rounddown_pow_of_two(hash_size); 557 if (init_exception_table(&s->complete, hash_size, 558 DM_CHUNK_CONSECUTIVE_BITS)) 559 return -ENOMEM; 560 561 /* 562 * Allocate hash table for in-flight exceptions 563 * Make this smaller than the real hash table 564 */ 565 hash_size >>= 3; 566 if (hash_size < 64) 567 hash_size = 64; 568 569 if (init_exception_table(&s->pending, hash_size, 0)) { 570 exit_exception_table(&s->complete, exception_cache); 571 return -ENOMEM; 572 } 573 574 return 0; 575} 576 577/* 578 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 579 */ 580static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 581{ 582 struct dm_snapshot *s; 583 int i; 584 int r = -EINVAL; 585 char *origin_path; 586 struct dm_exception_store *store; 587 unsigned args_used; 588 589 if (argc != 4) { 590 ti->error = "requires exactly 4 arguments"; 591 r = -EINVAL; 592 goto bad_args; 593 } 594 595 origin_path = argv[0]; 596 argv++; 597 argc--; 598 599 r = dm_exception_store_create(ti, argc, argv, &args_used, &store); 600 if (r) { 601 ti->error = "Couldn't create exception store"; 602 r = -EINVAL; 603 goto bad_args; 604 } 605 606 argv += args_used; 607 argc -= args_used; 608 609 s = kmalloc(sizeof(*s), GFP_KERNEL); 610 if (!s) { 611 ti->error = "Cannot allocate snapshot context private " 612 "structure"; 613 r = -ENOMEM; 614 goto bad_snap; 615 } 616 617 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); 618 if (r) { 619 ti->error = "Cannot get origin device"; 620 goto bad_origin; 621 } 622 623 s->store = store; 624 s->valid = 1; 625 s->active = 0; 626 atomic_set(&s->pending_exceptions_count, 0); 627 init_rwsem(&s->lock); 628 spin_lock_init(&s->pe_lock); 629 630 /* Allocate hash table for COW data */ 631 if (init_hash_tables(s)) { 632 ti->error = "Unable to allocate hash table space"; 633 r = -ENOMEM; 634 goto bad_hash_tables; 635 } 636 637 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); 638 if (r) { 639 ti->error = "Could not create kcopyd client"; 640 goto bad_kcopyd; 641 } 642 643 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); 644 if (!s->pending_pool) { 645 ti->error = "Could not allocate mempool for pending exceptions"; 646 goto bad_pending_pool; 647 } 648 649 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 650 tracked_chunk_cache); 651 if (!s->tracked_chunk_pool) { 652 ti->error = "Could not allocate tracked_chunk mempool for " 653 "tracking reads"; 654 goto bad_tracked_chunk_pool; 655 } 656 657 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 658 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 659 660 spin_lock_init(&s->tracked_chunk_lock); 661 662 /* Metadata must only be loaded into one table at once */ 663 r = s->store->type->read_metadata(s->store, dm_add_exception, 664 (void *)s); 665 if (r < 0) { 666 ti->error = "Failed to read snapshot metadata"; 667 goto bad_load_and_register; 668 } else if (r > 0) { 669 s->valid = 0; 670 DMWARN("Snapshot is marked invalid."); 671 } 672 673 bio_list_init(&s->queued_bios); 674 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 675 676 if (!s->store->chunk_size) { 677 ti->error = "Chunk size not set"; 678 goto bad_load_and_register; 679 } 680 681 /* Add snapshot to the list of snapshots for this origin */ 682 /* Exceptions aren't triggered till snapshot_resume() is called */ 683 if (register_snapshot(s)) { 684 r = -EINVAL; 685 ti->error = "Cannot register snapshot origin"; 686 goto bad_load_and_register; 687 } 688 689 ti->private = s; 690 ti->split_io = s->store->chunk_size; 691 ti->num_flush_requests = 1; 692 693 return 0; 694 695bad_load_and_register: 696 mempool_destroy(s->tracked_chunk_pool); 697 698bad_tracked_chunk_pool: 699 mempool_destroy(s->pending_pool); 700 701bad_pending_pool: 702 dm_kcopyd_client_destroy(s->kcopyd_client); 703 704bad_kcopyd: 705 exit_exception_table(&s->pending, pending_cache); 706 exit_exception_table(&s->complete, exception_cache); 707 708bad_hash_tables: 709 dm_put_device(ti, s->origin); 710 711bad_origin: 712 kfree(s); 713 714bad_snap: 715 dm_exception_store_destroy(store); 716 717bad_args: 718 return r; 719} 720 721static void __free_exceptions(struct dm_snapshot *s) 722{ 723 dm_kcopyd_client_destroy(s->kcopyd_client); 724 s->kcopyd_client = NULL; 725 726 exit_exception_table(&s->pending, pending_cache); 727 exit_exception_table(&s->complete, exception_cache); 728} 729 730static void snapshot_dtr(struct dm_target *ti) 731{ 732#ifdef CONFIG_DM_DEBUG 733 int i; 734#endif 735 struct dm_snapshot *s = ti->private; 736 737 flush_workqueue(ksnapd); 738 739 /* Prevent further origin writes from using this snapshot. */ 740 /* After this returns there can be no new kcopyd jobs. */ 741 unregister_snapshot(s); 742 743 while (atomic_read(&s->pending_exceptions_count)) 744 msleep(1); 745 /* 746 * Ensure instructions in mempool_destroy aren't reordered 747 * before atomic_read. 748 */ 749 smp_mb(); 750 751#ifdef CONFIG_DM_DEBUG 752 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 753 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 754#endif 755 756 mempool_destroy(s->tracked_chunk_pool); 757 758 __free_exceptions(s); 759 760 mempool_destroy(s->pending_pool); 761 762 dm_put_device(ti, s->origin); 763 764 dm_exception_store_destroy(s->store); 765 766 kfree(s); 767} 768 769/* 770 * Flush a list of buffers. 771 */ 772static void flush_bios(struct bio *bio) 773{ 774 struct bio *n; 775 776 while (bio) { 777 n = bio->bi_next; 778 bio->bi_next = NULL; 779 generic_make_request(bio); 780 bio = n; 781 } 782} 783 784static void flush_queued_bios(struct work_struct *work) 785{ 786 struct dm_snapshot *s = 787 container_of(work, struct dm_snapshot, queued_bios_work); 788 struct bio *queued_bios; 789 unsigned long flags; 790 791 spin_lock_irqsave(&s->pe_lock, flags); 792 queued_bios = bio_list_get(&s->queued_bios); 793 spin_unlock_irqrestore(&s->pe_lock, flags); 794 795 flush_bios(queued_bios); 796} 797 798/* 799 * Error a list of buffers. 800 */ 801static void error_bios(struct bio *bio) 802{ 803 struct bio *n; 804 805 while (bio) { 806 n = bio->bi_next; 807 bio->bi_next = NULL; 808 bio_io_error(bio); 809 bio = n; 810 } 811} 812 813static void __invalidate_snapshot(struct dm_snapshot *s, int err) 814{ 815 if (!s->valid) 816 return; 817 818 if (err == -EIO) 819 DMERR("Invalidating snapshot: Error reading/writing."); 820 else if (err == -ENOMEM) 821 DMERR("Invalidating snapshot: Unable to allocate exception."); 822 823 if (s->store->type->drop_snapshot) 824 s->store->type->drop_snapshot(s->store); 825 826 s->valid = 0; 827 828 dm_table_event(s->store->ti->table); 829} 830 831static void get_pending_exception(struct dm_snap_pending_exception *pe) 832{ 833 atomic_inc(&pe->ref_count); 834} 835 836static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) 837{ 838 struct dm_snap_pending_exception *primary_pe; 839 struct bio *origin_bios = NULL; 840 841 primary_pe = pe->primary_pe; 842 843 /* 844 * If this pe is involved in a write to the origin and 845 * it is the last sibling to complete then release 846 * the bios for the original write to the origin. 847 */ 848 if (primary_pe && 849 atomic_dec_and_test(&primary_pe->ref_count)) { 850 origin_bios = bio_list_get(&primary_pe->origin_bios); 851 free_pending_exception(primary_pe); 852 } 853 854 /* 855 * Free the pe if it's not linked to an origin write or if 856 * it's not itself a primary pe. 857 */ 858 if (!primary_pe || primary_pe != pe) 859 free_pending_exception(pe); 860 861 return origin_bios; 862} 863 864static void pending_complete(struct dm_snap_pending_exception *pe, int success) 865{ 866 struct dm_snap_exception *e; 867 struct dm_snapshot *s = pe->snap; 868 struct bio *origin_bios = NULL; 869 struct bio *snapshot_bios = NULL; 870 int error = 0; 871 872 if (!success) { 873 /* Read/write error - snapshot is unusable */ 874 down_write(&s->lock); 875 __invalidate_snapshot(s, -EIO); 876 error = 1; 877 goto out; 878 } 879 880 e = alloc_exception(); 881 if (!e) { 882 down_write(&s->lock); 883 __invalidate_snapshot(s, -ENOMEM); 884 error = 1; 885 goto out; 886 } 887 *e = pe->e; 888 889 down_write(&s->lock); 890 if (!s->valid) { 891 free_exception(e); 892 error = 1; 893 goto out; 894 } 895 896 /* 897 * Check for conflicting reads. This is extremely improbable, 898 * so msleep(1) is sufficient and there is no need for a wait queue. 899 */ 900 while (__chunk_is_tracked(s, pe->e.old_chunk)) 901 msleep(1); 902 903 /* 904 * Add a proper exception, and remove the 905 * in-flight exception from the list. 906 */ 907 insert_completed_exception(s, e); 908 909 out: 910 remove_exception(&pe->e); 911 snapshot_bios = bio_list_get(&pe->snapshot_bios); 912 origin_bios = put_pending_exception(pe); 913 914 up_write(&s->lock); 915 916 /* Submit any pending write bios */ 917 if (error) 918 error_bios(snapshot_bios); 919 else 920 flush_bios(snapshot_bios); 921 922 flush_bios(origin_bios); 923} 924 925static void commit_callback(void *context, int success) 926{ 927 struct dm_snap_pending_exception *pe = context; 928 929 pending_complete(pe, success); 930} 931 932/* 933 * Called when the copy I/O has finished. kcopyd actually runs 934 * this code so don't block. 935 */ 936static void copy_callback(int read_err, unsigned long write_err, void *context) 937{ 938 struct dm_snap_pending_exception *pe = context; 939 struct dm_snapshot *s = pe->snap; 940 941 if (read_err || write_err) 942 pending_complete(pe, 0); 943 944 else 945 /* Update the metadata if we are persistent */ 946 s->store->type->commit_exception(s->store, &pe->e, 947 commit_callback, pe); 948} 949 950/* 951 * Dispatches the copy operation to kcopyd. 952 */ 953static void start_copy(struct dm_snap_pending_exception *pe) 954{ 955 struct dm_snapshot *s = pe->snap; 956 struct dm_io_region src, dest; 957 struct block_device *bdev = s->origin->bdev; 958 sector_t dev_size; 959 960 dev_size = get_dev_size(bdev); 961 962 src.bdev = bdev; 963 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 964 src.count = min(s->store->chunk_size, dev_size - src.sector); 965 966 dest.bdev = s->store->cow->bdev; 967 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 968 dest.count = src.count; 969 970 /* Hand over to kcopyd */ 971 dm_kcopyd_copy(s->kcopyd_client, 972 &src, 1, &dest, 0, copy_callback, pe); 973} 974 975static struct dm_snap_pending_exception * 976__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) 977{ 978 struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); 979 980 if (!e) 981 return NULL; 982 983 return container_of(e, struct dm_snap_pending_exception, e); 984} 985 986/* 987 * Looks to see if this snapshot already has a pending exception 988 * for this chunk, otherwise it allocates a new one and inserts 989 * it into the pending table. 990 * 991 * NOTE: a write lock must be held on snap->lock before calling 992 * this. 993 */ 994static struct dm_snap_pending_exception * 995__find_pending_exception(struct dm_snapshot *s, 996 struct dm_snap_pending_exception *pe, chunk_t chunk) 997{ 998 struct dm_snap_pending_exception *pe2; 999 1000 pe2 = __lookup_pending_exception(s, chunk); 1001 if (pe2) { 1002 free_pending_exception(pe); 1003 return pe2; 1004 } 1005 1006 pe->e.old_chunk = chunk; 1007 bio_list_init(&pe->origin_bios); 1008 bio_list_init(&pe->snapshot_bios); 1009 pe->primary_pe = NULL; 1010 atomic_set(&pe->ref_count, 0); 1011 pe->started = 0; 1012 1013 if (s->store->type->prepare_exception(s->store, &pe->e)) { 1014 free_pending_exception(pe); 1015 return NULL; 1016 } 1017 1018 get_pending_exception(pe); 1019 insert_exception(&s->pending, &pe->e); 1020 1021 return pe; 1022} 1023 1024static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, 1025 struct bio *bio, chunk_t chunk) 1026{ 1027 bio->bi_bdev = s->store->cow->bdev; 1028 bio->bi_sector = chunk_to_sector(s->store, 1029 dm_chunk_number(e->new_chunk) + 1030 (chunk - e->old_chunk)) + 1031 (bio->bi_sector & 1032 s->store->chunk_mask); 1033} 1034 1035static int snapshot_map(struct dm_target *ti, struct bio *bio, 1036 union map_info *map_context) 1037{ 1038 struct dm_snap_exception *e; 1039 struct dm_snapshot *s = ti->private; 1040 int r = DM_MAPIO_REMAPPED; 1041 chunk_t chunk; 1042 struct dm_snap_pending_exception *pe = NULL; 1043 1044 if (unlikely(bio_empty_barrier(bio))) { 1045 bio->bi_bdev = s->store->cow->bdev; 1046 return DM_MAPIO_REMAPPED; 1047 } 1048 1049 chunk = sector_to_chunk(s->store, bio->bi_sector); 1050 1051 /* Full snapshots are not usable */ 1052 /* To get here the table must be live so s->active is always set. */ 1053 if (!s->valid) 1054 return -EIO; 1055 1056 /* FIXME: should only take write lock if we need 1057 * to copy an exception */ 1058 down_write(&s->lock); 1059 1060 if (!s->valid) { 1061 r = -EIO; 1062 goto out_unlock; 1063 } 1064 1065 /* If the block is already remapped - use that, else remap it */ 1066 e = lookup_exception(&s->complete, chunk); 1067 if (e) { 1068 remap_exception(s, e, bio, chunk); 1069 goto out_unlock; 1070 } 1071 1072 /* 1073 * Write to snapshot - higher level takes care of RW/RO 1074 * flags so we should only get this if we are 1075 * writeable. 1076 */ 1077 if (bio_rw(bio) == WRITE) { 1078 pe = __lookup_pending_exception(s, chunk); 1079 if (!pe) { 1080 up_write(&s->lock); 1081 pe = alloc_pending_exception(s); 1082 down_write(&s->lock); 1083 1084 if (!s->valid) { 1085 free_pending_exception(pe); 1086 r = -EIO; 1087 goto out_unlock; 1088 } 1089 1090 e = lookup_exception(&s->complete, chunk); 1091 if (e) { 1092 free_pending_exception(pe); 1093 remap_exception(s, e, bio, chunk); 1094 goto out_unlock; 1095 } 1096 1097 pe = __find_pending_exception(s, pe, chunk); 1098 if (!pe) { 1099 __invalidate_snapshot(s, -ENOMEM); 1100 r = -EIO; 1101 goto out_unlock; 1102 } 1103 } 1104 1105 remap_exception(s, &pe->e, bio, chunk); 1106 bio_list_add(&pe->snapshot_bios, bio); 1107 1108 r = DM_MAPIO_SUBMITTED; 1109 1110 if (!pe->started) { 1111 /* this is protected by snap->lock */ 1112 pe->started = 1; 1113 up_write(&s->lock); 1114 start_copy(pe); 1115 goto out; 1116 } 1117 } else { 1118 bio->bi_bdev = s->origin->bdev; 1119 map_context->ptr = track_chunk(s, chunk); 1120 } 1121 1122 out_unlock: 1123 up_write(&s->lock); 1124 out: 1125 return r; 1126} 1127 1128static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1129 int error, union map_info *map_context) 1130{ 1131 struct dm_snapshot *s = ti->private; 1132 struct dm_snap_tracked_chunk *c = map_context->ptr; 1133 1134 if (c) 1135 stop_tracking_chunk(s, c); 1136 1137 return 0; 1138} 1139 1140static void snapshot_resume(struct dm_target *ti) 1141{ 1142 struct dm_snapshot *s = ti->private; 1143 1144 down_write(&s->lock); 1145 s->active = 1; 1146 up_write(&s->lock); 1147} 1148 1149static int snapshot_status(struct dm_target *ti, status_type_t type, 1150 char *result, unsigned int maxlen) 1151{ 1152 unsigned sz = 0; 1153 struct dm_snapshot *snap = ti->private; 1154 1155 down_write(&snap->lock); 1156 1157 switch (type) { 1158 case STATUSTYPE_INFO: 1159 if (!snap->valid) 1160 DMEMIT("Invalid"); 1161 else { 1162 if (snap->store->type->fraction_full) { 1163 sector_t numerator, denominator; 1164 snap->store->type->fraction_full(snap->store, 1165 &numerator, 1166 &denominator); 1167 DMEMIT("%llu/%llu", 1168 (unsigned long long)numerator, 1169 (unsigned long long)denominator); 1170 } 1171 else 1172 DMEMIT("Unknown"); 1173 } 1174 break; 1175 1176 case STATUSTYPE_TABLE: 1177 /* 1178 * kdevname returns a static pointer so we need 1179 * to make private copies if the output is to 1180 * make sense. 1181 */ 1182 DMEMIT("%s", snap->origin->name); 1183 snap->store->type->status(snap->store, type, result + sz, 1184 maxlen - sz); 1185 break; 1186 } 1187 1188 up_write(&snap->lock); 1189 1190 return 0; 1191} 1192 1193static int snapshot_iterate_devices(struct dm_target *ti, 1194 iterate_devices_callout_fn fn, void *data) 1195{ 1196 struct dm_snapshot *snap = ti->private; 1197 1198 return fn(ti, snap->origin, 0, ti->len, data); 1199} 1200 1201 1202/*----------------------------------------------------------------- 1203 * Origin methods 1204 *---------------------------------------------------------------*/ 1205static int __origin_write(struct list_head *snapshots, struct bio *bio) 1206{ 1207 int r = DM_MAPIO_REMAPPED, first = 0; 1208 struct dm_snapshot *snap; 1209 struct dm_snap_exception *e; 1210 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; 1211 chunk_t chunk; 1212 LIST_HEAD(pe_queue); 1213 1214 /* Do all the snapshots on this origin */ 1215 list_for_each_entry (snap, snapshots, list) { 1216 1217 down_write(&snap->lock); 1218 1219 /* Only deal with valid and active snapshots */ 1220 if (!snap->valid || !snap->active) 1221 goto next_snapshot; 1222 1223 /* Nothing to do if writing beyond end of snapshot */ 1224 if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) 1225 goto next_snapshot; 1226 1227 /* 1228 * Remember, different snapshots can have 1229 * different chunk sizes. 1230 */ 1231 chunk = sector_to_chunk(snap->store, bio->bi_sector); 1232 1233 /* 1234 * Check exception table to see if block 1235 * is already remapped in this snapshot 1236 * and trigger an exception if not. 1237 * 1238 * ref_count is initialised to 1 so pending_complete() 1239 * won't destroy the primary_pe while we're inside this loop. 1240 */ 1241 e = lookup_exception(&snap->complete, chunk); 1242 if (e) 1243 goto next_snapshot; 1244 1245 pe = __lookup_pending_exception(snap, chunk); 1246 if (!pe) { 1247 up_write(&snap->lock); 1248 pe = alloc_pending_exception(snap); 1249 down_write(&snap->lock); 1250 1251 if (!snap->valid) { 1252 free_pending_exception(pe); 1253 goto next_snapshot; 1254 } 1255 1256 e = lookup_exception(&snap->complete, chunk); 1257 if (e) { 1258 free_pending_exception(pe); 1259 goto next_snapshot; 1260 } 1261 1262 pe = __find_pending_exception(snap, pe, chunk); 1263 if (!pe) { 1264 __invalidate_snapshot(snap, -ENOMEM); 1265 goto next_snapshot; 1266 } 1267 } 1268 1269 if (!primary_pe) { 1270 /* 1271 * Either every pe here has same 1272 * primary_pe or none has one yet. 1273 */ 1274 if (pe->primary_pe) 1275 primary_pe = pe->primary_pe; 1276 else { 1277 primary_pe = pe; 1278 first = 1; 1279 } 1280 1281 bio_list_add(&primary_pe->origin_bios, bio); 1282 1283 r = DM_MAPIO_SUBMITTED; 1284 } 1285 1286 if (!pe->primary_pe) { 1287 pe->primary_pe = primary_pe; 1288 get_pending_exception(primary_pe); 1289 } 1290 1291 if (!pe->started) { 1292 pe->started = 1; 1293 list_add_tail(&pe->list, &pe_queue); 1294 } 1295 1296 next_snapshot: 1297 up_write(&snap->lock); 1298 } 1299 1300 if (!primary_pe) 1301 return r; 1302 1303 /* 1304 * If this is the first time we're processing this chunk and 1305 * ref_count is now 1 it means all the pending exceptions 1306 * got completed while we were in the loop above, so it falls to 1307 * us here to remove the primary_pe and submit any origin_bios. 1308 */ 1309 1310 if (first && atomic_dec_and_test(&primary_pe->ref_count)) { 1311 flush_bios(bio_list_get(&primary_pe->origin_bios)); 1312 free_pending_exception(primary_pe); 1313 /* If we got here, pe_queue is necessarily empty. */ 1314 return r; 1315 } 1316 1317 /* 1318 * Now that we have a complete pe list we can start the copying. 1319 */ 1320 list_for_each_entry_safe(pe, next_pe, &pe_queue, list) 1321 start_copy(pe); 1322 1323 return r; 1324} 1325 1326/* 1327 * Called on a write from the origin driver. 1328 */ 1329static int do_origin(struct dm_dev *origin, struct bio *bio) 1330{ 1331 struct origin *o; 1332 int r = DM_MAPIO_REMAPPED; 1333 1334 down_read(&_origins_lock); 1335 o = __lookup_origin(origin->bdev); 1336 if (o) 1337 r = __origin_write(&o->snapshots, bio); 1338 up_read(&_origins_lock); 1339 1340 return r; 1341} 1342 1343/* 1344 * Origin: maps a linear range of a device, with hooks for snapshotting. 1345 */ 1346 1347/* 1348 * Construct an origin mapping: <dev_path> 1349 * The context for an origin is merely a 'struct dm_dev *' 1350 * pointing to the real device. 1351 */ 1352static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1353{ 1354 int r; 1355 struct dm_dev *dev; 1356 1357 if (argc != 1) { 1358 ti->error = "origin: incorrect number of arguments"; 1359 return -EINVAL; 1360 } 1361 1362 r = dm_get_device(ti, argv[0], 0, ti->len, 1363 dm_table_get_mode(ti->table), &dev); 1364 if (r) { 1365 ti->error = "Cannot get target device"; 1366 return r; 1367 } 1368 1369 ti->private = dev; 1370 ti->num_flush_requests = 1; 1371 1372 return 0; 1373} 1374 1375static void origin_dtr(struct dm_target *ti) 1376{ 1377 struct dm_dev *dev = ti->private; 1378 dm_put_device(ti, dev); 1379} 1380 1381static int origin_map(struct dm_target *ti, struct bio *bio, 1382 union map_info *map_context) 1383{ 1384 struct dm_dev *dev = ti->private; 1385 bio->bi_bdev = dev->bdev; 1386 1387 if (unlikely(bio_empty_barrier(bio))) 1388 return DM_MAPIO_REMAPPED; 1389 1390 /* Only tell snapshots if this is a write */ 1391 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; 1392} 1393 1394#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 1395 1396/* 1397 * Set the target "split_io" field to the minimum of all the snapshots' 1398 * chunk sizes. 1399 */ 1400static void origin_resume(struct dm_target *ti) 1401{ 1402 struct dm_dev *dev = ti->private; 1403 struct dm_snapshot *snap; 1404 struct origin *o; 1405 chunk_t chunk_size = 0; 1406 1407 down_read(&_origins_lock); 1408 o = __lookup_origin(dev->bdev); 1409 if (o) 1410 list_for_each_entry (snap, &o->snapshots, list) 1411 chunk_size = min_not_zero(chunk_size, 1412 snap->store->chunk_size); 1413 up_read(&_origins_lock); 1414 1415 ti->split_io = chunk_size; 1416} 1417 1418static int origin_status(struct dm_target *ti, status_type_t type, char *result, 1419 unsigned int maxlen) 1420{ 1421 struct dm_dev *dev = ti->private; 1422 1423 switch (type) { 1424 case STATUSTYPE_INFO: 1425 result[0] = '\0'; 1426 break; 1427 1428 case STATUSTYPE_TABLE: 1429 snprintf(result, maxlen, "%s", dev->name); 1430 break; 1431 } 1432 1433 return 0; 1434} 1435 1436static int origin_iterate_devices(struct dm_target *ti, 1437 iterate_devices_callout_fn fn, void *data) 1438{ 1439 struct dm_dev *dev = ti->private; 1440 1441 return fn(ti, dev, 0, ti->len, data); 1442} 1443 1444static struct target_type origin_target = { 1445 .name = "snapshot-origin", 1446 .version = {1, 7, 0}, 1447 .module = THIS_MODULE, 1448 .ctr = origin_ctr, 1449 .dtr = origin_dtr, 1450 .map = origin_map, 1451 .resume = origin_resume, 1452 .status = origin_status, 1453 .iterate_devices = origin_iterate_devices, 1454}; 1455 1456static struct target_type snapshot_target = { 1457 .name = "snapshot", 1458 .version = {1, 7, 0}, 1459 .module = THIS_MODULE, 1460 .ctr = snapshot_ctr, 1461 .dtr = snapshot_dtr, 1462 .map = snapshot_map, 1463 .end_io = snapshot_end_io, 1464 .resume = snapshot_resume, 1465 .status = snapshot_status, 1466 .iterate_devices = snapshot_iterate_devices, 1467}; 1468 1469static int __init dm_snapshot_init(void) 1470{ 1471 int r; 1472 1473 r = dm_exception_store_init(); 1474 if (r) { 1475 DMERR("Failed to initialize exception stores"); 1476 return r; 1477 } 1478 1479 r = dm_register_target(&snapshot_target); 1480 if (r) { 1481 DMERR("snapshot target register failed %d", r); 1482 goto bad_register_snapshot_target; 1483 } 1484 1485 r = dm_register_target(&origin_target); 1486 if (r < 0) { 1487 DMERR("Origin target register failed %d", r); 1488 goto bad1; 1489 } 1490 1491 r = init_origin_hash(); 1492 if (r) { 1493 DMERR("init_origin_hash failed."); 1494 goto bad2; 1495 } 1496 1497 exception_cache = KMEM_CACHE(dm_snap_exception, 0); 1498 if (!exception_cache) { 1499 DMERR("Couldn't create exception cache."); 1500 r = -ENOMEM; 1501 goto bad3; 1502 } 1503 1504 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); 1505 if (!pending_cache) { 1506 DMERR("Couldn't create pending cache."); 1507 r = -ENOMEM; 1508 goto bad4; 1509 } 1510 1511 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 1512 if (!tracked_chunk_cache) { 1513 DMERR("Couldn't create cache to track chunks in use."); 1514 r = -ENOMEM; 1515 goto bad5; 1516 } 1517 1518 ksnapd = create_singlethread_workqueue("ksnapd"); 1519 if (!ksnapd) { 1520 DMERR("Failed to create ksnapd workqueue."); 1521 r = -ENOMEM; 1522 goto bad_pending_pool; 1523 } 1524 1525 return 0; 1526 1527bad_pending_pool: 1528 kmem_cache_destroy(tracked_chunk_cache); 1529bad5: 1530 kmem_cache_destroy(pending_cache); 1531bad4: 1532 kmem_cache_destroy(exception_cache); 1533bad3: 1534 exit_origin_hash(); 1535bad2: 1536 dm_unregister_target(&origin_target); 1537bad1: 1538 dm_unregister_target(&snapshot_target); 1539 1540bad_register_snapshot_target: 1541 dm_exception_store_exit(); 1542 return r; 1543} 1544 1545static void __exit dm_snapshot_exit(void) 1546{ 1547 destroy_workqueue(ksnapd); 1548 1549 dm_unregister_target(&snapshot_target); 1550 dm_unregister_target(&origin_target); 1551 1552 exit_origin_hash(); 1553 kmem_cache_destroy(pending_cache); 1554 kmem_cache_destroy(exception_cache); 1555 kmem_cache_destroy(tracked_chunk_cache); 1556 1557 dm_exception_store_exit(); 1558} 1559 1560/* Module hooks */ 1561module_init(dm_snapshot_init); 1562module_exit(dm_snapshot_exit); 1563 1564MODULE_DESCRIPTION(DM_NAME " snapshot target"); 1565MODULE_AUTHOR("Joe Thornber"); 1566MODULE_LICENSE("GPL"); 1567