dm-snap.c revision 35bf659b008e83e725dcd30f542e38461dbb867c
1/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
10#include <linux/ctype.h>
11#include <linux/device-mapper.h>
12#include <linux/delay.h>
13#include <linux/fs.h>
14#include <linux/init.h>
15#include <linux/kdev_t.h>
16#include <linux/list.h>
17#include <linux/mempool.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/log2.h>
22#include <linux/dm-kcopyd.h>
23
24#include "dm-exception-store.h"
25#include "dm-snap.h"
26#include "dm-bio-list.h"
27
28#define DM_MSG_PREFIX "snapshots"
29
30/*
31 * The percentage increment we will wake up users at
32 */
33#define WAKE_UP_PERCENT 5
34
35/*
36 * kcopyd priority of snapshot operations
37 */
38#define SNAPSHOT_COPY_PRIORITY 2
39
40/*
41 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
42 */
43#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
44
45/*
46 * The size of the mempool used to track chunks in use.
47 */
48#define MIN_IOS 256
49
50static struct workqueue_struct *ksnapd;
51static void flush_queued_bios(struct work_struct *work);
52
53struct dm_snap_pending_exception {
54	struct dm_snap_exception e;
55
56	/*
57	 * Origin buffers waiting for this to complete are held
58	 * in a bio list
59	 */
60	struct bio_list origin_bios;
61	struct bio_list snapshot_bios;
62
63	/*
64	 * Short-term queue of pending exceptions prior to submission.
65	 */
66	struct list_head list;
67
68	/*
69	 * The primary pending_exception is the one that holds
70	 * the ref_count and the list of origin_bios for a
71	 * group of pending_exceptions.  It is always last to get freed.
72	 * These fields get set up when writing to the origin.
73	 */
74	struct dm_snap_pending_exception *primary_pe;
75
76	/*
77	 * Number of pending_exceptions processing this chunk.
78	 * When this drops to zero we must complete the origin bios.
79	 * If incrementing or decrementing this, hold pe->snap->lock for
80	 * the sibling concerned and not pe->primary_pe->snap->lock unless
81	 * they are the same.
82	 */
83	atomic_t ref_count;
84
85	/* Pointer back to snapshot context */
86	struct dm_snapshot *snap;
87
88	/*
89	 * 1 indicates the exception has already been sent to
90	 * kcopyd.
91	 */
92	int started;
93};
94
95/*
96 * Hash table mapping origin volumes to lists of snapshots and
97 * a lock to protect it
98 */
99static struct kmem_cache *exception_cache;
100static struct kmem_cache *pending_cache;
101
102struct dm_snap_tracked_chunk {
103	struct hlist_node node;
104	chunk_t chunk;
105};
106
107static struct kmem_cache *tracked_chunk_cache;
108
109static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
110						 chunk_t chunk)
111{
112	struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
113							GFP_NOIO);
114	unsigned long flags;
115
116	c->chunk = chunk;
117
118	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
119	hlist_add_head(&c->node,
120		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
121	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
122
123	return c;
124}
125
126static void stop_tracking_chunk(struct dm_snapshot *s,
127				struct dm_snap_tracked_chunk *c)
128{
129	unsigned long flags;
130
131	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
132	hlist_del(&c->node);
133	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
134
135	mempool_free(c, s->tracked_chunk_pool);
136}
137
138static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
139{
140	struct dm_snap_tracked_chunk *c;
141	struct hlist_node *hn;
142	int found = 0;
143
144	spin_lock_irq(&s->tracked_chunk_lock);
145
146	hlist_for_each_entry(c, hn,
147	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
148		if (c->chunk == chunk) {
149			found = 1;
150			break;
151		}
152	}
153
154	spin_unlock_irq(&s->tracked_chunk_lock);
155
156	return found;
157}
158
159/*
160 * One of these per registered origin, held in the snapshot_origins hash
161 */
162struct origin {
163	/* The origin device */
164	struct block_device *bdev;
165
166	struct list_head hash_list;
167
168	/* List of snapshots for this origin */
169	struct list_head snapshots;
170};
171
172/*
173 * Size of the hash table for origin volumes. If we make this
174 * the size of the minors list then it should be nearly perfect
175 */
176#define ORIGIN_HASH_SIZE 256
177#define ORIGIN_MASK      0xFF
178static struct list_head *_origins;
179static struct rw_semaphore _origins_lock;
180
181static int init_origin_hash(void)
182{
183	int i;
184
185	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
186			   GFP_KERNEL);
187	if (!_origins) {
188		DMERR("unable to allocate memory");
189		return -ENOMEM;
190	}
191
192	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
193		INIT_LIST_HEAD(_origins + i);
194	init_rwsem(&_origins_lock);
195
196	return 0;
197}
198
199static void exit_origin_hash(void)
200{
201	kfree(_origins);
202}
203
204static unsigned origin_hash(struct block_device *bdev)
205{
206	return bdev->bd_dev & ORIGIN_MASK;
207}
208
209static struct origin *__lookup_origin(struct block_device *origin)
210{
211	struct list_head *ol;
212	struct origin *o;
213
214	ol = &_origins[origin_hash(origin)];
215	list_for_each_entry (o, ol, hash_list)
216		if (bdev_equal(o->bdev, origin))
217			return o;
218
219	return NULL;
220}
221
222static void __insert_origin(struct origin *o)
223{
224	struct list_head *sl = &_origins[origin_hash(o->bdev)];
225	list_add_tail(&o->hash_list, sl);
226}
227
228/*
229 * Make a note of the snapshot and its origin so we can look it
230 * up when the origin has a write on it.
231 */
232static int register_snapshot(struct dm_snapshot *snap)
233{
234	struct origin *o, *new_o;
235	struct block_device *bdev = snap->origin->bdev;
236
237	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
238	if (!new_o)
239		return -ENOMEM;
240
241	down_write(&_origins_lock);
242	o = __lookup_origin(bdev);
243
244	if (o)
245		kfree(new_o);
246	else {
247		/* New origin */
248		o = new_o;
249
250		/* Initialise the struct */
251		INIT_LIST_HEAD(&o->snapshots);
252		o->bdev = bdev;
253
254		__insert_origin(o);
255	}
256
257	list_add_tail(&snap->list, &o->snapshots);
258
259	up_write(&_origins_lock);
260	return 0;
261}
262
263static void unregister_snapshot(struct dm_snapshot *s)
264{
265	struct origin *o;
266
267	down_write(&_origins_lock);
268	o = __lookup_origin(s->origin->bdev);
269
270	list_del(&s->list);
271	if (list_empty(&o->snapshots)) {
272		list_del(&o->hash_list);
273		kfree(o);
274	}
275
276	up_write(&_origins_lock);
277}
278
279/*
280 * Implementation of the exception hash tables.
281 * The lowest hash_shift bits of the chunk number are ignored, allowing
282 * some consecutive chunks to be grouped together.
283 */
284static int init_exception_table(struct exception_table *et, uint32_t size,
285				unsigned hash_shift)
286{
287	unsigned int i;
288
289	et->hash_shift = hash_shift;
290	et->hash_mask = size - 1;
291	et->table = dm_vcalloc(size, sizeof(struct list_head));
292	if (!et->table)
293		return -ENOMEM;
294
295	for (i = 0; i < size; i++)
296		INIT_LIST_HEAD(et->table + i);
297
298	return 0;
299}
300
301static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
302{
303	struct list_head *slot;
304	struct dm_snap_exception *ex, *next;
305	int i, size;
306
307	size = et->hash_mask + 1;
308	for (i = 0; i < size; i++) {
309		slot = et->table + i;
310
311		list_for_each_entry_safe (ex, next, slot, hash_list)
312			kmem_cache_free(mem, ex);
313	}
314
315	vfree(et->table);
316}
317
318static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
319{
320	return (chunk >> et->hash_shift) & et->hash_mask;
321}
322
323static void insert_exception(struct exception_table *eh,
324			     struct dm_snap_exception *e)
325{
326	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
327	list_add(&e->hash_list, l);
328}
329
330static void remove_exception(struct dm_snap_exception *e)
331{
332	list_del(&e->hash_list);
333}
334
335/*
336 * Return the exception data for a sector, or NULL if not
337 * remapped.
338 */
339static struct dm_snap_exception *lookup_exception(struct exception_table *et,
340						  chunk_t chunk)
341{
342	struct list_head *slot;
343	struct dm_snap_exception *e;
344
345	slot = &et->table[exception_hash(et, chunk)];
346	list_for_each_entry (e, slot, hash_list)
347		if (chunk >= e->old_chunk &&
348		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
349			return e;
350
351	return NULL;
352}
353
354static struct dm_snap_exception *alloc_exception(void)
355{
356	struct dm_snap_exception *e;
357
358	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
359	if (!e)
360		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
361
362	return e;
363}
364
365static void free_exception(struct dm_snap_exception *e)
366{
367	kmem_cache_free(exception_cache, e);
368}
369
370static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
371{
372	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
373							     GFP_NOIO);
374
375	atomic_inc(&s->pending_exceptions_count);
376	pe->snap = s;
377
378	return pe;
379}
380
381static void free_pending_exception(struct dm_snap_pending_exception *pe)
382{
383	struct dm_snapshot *s = pe->snap;
384
385	mempool_free(pe, s->pending_pool);
386	smp_mb__before_atomic_dec();
387	atomic_dec(&s->pending_exceptions_count);
388}
389
390static void insert_completed_exception(struct dm_snapshot *s,
391				       struct dm_snap_exception *new_e)
392{
393	struct exception_table *eh = &s->complete;
394	struct list_head *l;
395	struct dm_snap_exception *e = NULL;
396
397	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
398
399	/* Add immediately if this table doesn't support consecutive chunks */
400	if (!eh->hash_shift)
401		goto out;
402
403	/* List is ordered by old_chunk */
404	list_for_each_entry_reverse(e, l, hash_list) {
405		/* Insert after an existing chunk? */
406		if (new_e->old_chunk == (e->old_chunk +
407					 dm_consecutive_chunk_count(e) + 1) &&
408		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
409					 dm_consecutive_chunk_count(e) + 1)) {
410			dm_consecutive_chunk_count_inc(e);
411			free_exception(new_e);
412			return;
413		}
414
415		/* Insert before an existing chunk? */
416		if (new_e->old_chunk == (e->old_chunk - 1) &&
417		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
418			dm_consecutive_chunk_count_inc(e);
419			e->old_chunk--;
420			e->new_chunk--;
421			free_exception(new_e);
422			return;
423		}
424
425		if (new_e->old_chunk > e->old_chunk)
426			break;
427	}
428
429out:
430	list_add(&new_e->hash_list, e ? &e->hash_list : l);
431}
432
433/*
434 * Callback used by the exception stores to load exceptions when
435 * initialising.
436 */
437static int dm_add_exception(void *context, chunk_t old, chunk_t new)
438{
439	struct dm_snapshot *s = context;
440	struct dm_snap_exception *e;
441
442	e = alloc_exception();
443	if (!e)
444		return -ENOMEM;
445
446	e->old_chunk = old;
447
448	/* Consecutive_count is implicitly initialised to zero */
449	e->new_chunk = new;
450
451	insert_completed_exception(s, e);
452
453	return 0;
454}
455
456/*
457 * Hard coded magic.
458 */
459static int calc_max_buckets(void)
460{
461	/* use a fixed size of 2MB */
462	unsigned long mem = 2 * 1024 * 1024;
463	mem /= sizeof(struct list_head);
464
465	return mem;
466}
467
468/*
469 * Allocate room for a suitable hash table.
470 */
471static int init_hash_tables(struct dm_snapshot *s)
472{
473	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
474
475	/*
476	 * Calculate based on the size of the original volume or
477	 * the COW volume...
478	 */
479	cow_dev_size = get_dev_size(s->cow->bdev);
480	origin_dev_size = get_dev_size(s->origin->bdev);
481	max_buckets = calc_max_buckets();
482
483	hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
484	hash_size = min(hash_size, max_buckets);
485
486	hash_size = rounddown_pow_of_two(hash_size);
487	if (init_exception_table(&s->complete, hash_size,
488				 DM_CHUNK_CONSECUTIVE_BITS))
489		return -ENOMEM;
490
491	/*
492	 * Allocate hash table for in-flight exceptions
493	 * Make this smaller than the real hash table
494	 */
495	hash_size >>= 3;
496	if (hash_size < 64)
497		hash_size = 64;
498
499	if (init_exception_table(&s->pending, hash_size, 0)) {
500		exit_exception_table(&s->complete, exception_cache);
501		return -ENOMEM;
502	}
503
504	return 0;
505}
506
507/*
508 * Round a number up to the nearest 'size' boundary.  size must
509 * be a power of 2.
510 */
511static ulong round_up(ulong n, ulong size)
512{
513	size--;
514	return (n + size) & ~size;
515}
516
517static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
518			  char **error)
519{
520	unsigned long chunk_size;
521	char *value;
522
523	chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
524	if (*chunk_size_arg == '\0' || *value != '\0') {
525		*error = "Invalid chunk size";
526		return -EINVAL;
527	}
528
529	if (!chunk_size) {
530		s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
531		return 0;
532	}
533
534	/*
535	 * Chunk size must be multiple of page size.  Silently
536	 * round up if it's not.
537	 */
538	chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
539
540	/* Check chunk_size is a power of 2 */
541	if (!is_power_of_2(chunk_size)) {
542		*error = "Chunk size is not a power of 2";
543		return -EINVAL;
544	}
545
546	/* Validate the chunk size against the device block size */
547	if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
548		*error = "Chunk size is not a multiple of device blocksize";
549		return -EINVAL;
550	}
551
552	s->chunk_size = chunk_size;
553	s->chunk_mask = chunk_size - 1;
554	s->chunk_shift = ffs(chunk_size) - 1;
555
556	return 0;
557}
558
559/*
560 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
561 */
562static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
563{
564	struct dm_snapshot *s;
565	int i;
566	int r = -EINVAL;
567	char persistent;
568	char *origin_path;
569	char *cow_path;
570
571	if (argc != 4) {
572		ti->error = "requires exactly 4 arguments";
573		r = -EINVAL;
574		goto bad1;
575	}
576
577	origin_path = argv[0];
578	cow_path = argv[1];
579	persistent = toupper(*argv[2]);
580
581	if (persistent != 'P' && persistent != 'N') {
582		ti->error = "Persistent flag is not P or N";
583		r = -EINVAL;
584		goto bad1;
585	}
586
587	s = kmalloc(sizeof(*s), GFP_KERNEL);
588	if (s == NULL) {
589		ti->error = "Cannot allocate snapshot context private "
590		    "structure";
591		r = -ENOMEM;
592		goto bad1;
593	}
594
595	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
596	if (r) {
597		ti->error = "Cannot get origin device";
598		goto bad2;
599	}
600
601	r = dm_get_device(ti, cow_path, 0, 0,
602			  FMODE_READ | FMODE_WRITE, &s->cow);
603	if (r) {
604		dm_put_device(ti, s->origin);
605		ti->error = "Cannot get COW device";
606		goto bad2;
607	}
608
609	r = set_chunk_size(s, argv[3], &ti->error);
610	if (r)
611		goto bad3;
612
613	s->type = persistent;
614
615	s->valid = 1;
616	s->active = 0;
617	atomic_set(&s->pending_exceptions_count, 0);
618	init_rwsem(&s->lock);
619	spin_lock_init(&s->pe_lock);
620	s->ti = ti;
621
622	/* Allocate hash table for COW data */
623	if (init_hash_tables(s)) {
624		ti->error = "Unable to allocate hash table space";
625		r = -ENOMEM;
626		goto bad3;
627	}
628
629	s->store.snap = s;
630
631	if (persistent == 'P')
632		r = dm_create_persistent(&s->store);
633	else
634		r = dm_create_transient(&s->store);
635
636	if (r) {
637		ti->error = "Couldn't create exception store";
638		r = -EINVAL;
639		goto bad4;
640	}
641
642	r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
643	if (r) {
644		ti->error = "Could not create kcopyd client";
645		goto bad5;
646	}
647
648	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
649	if (!s->pending_pool) {
650		ti->error = "Could not allocate mempool for pending exceptions";
651		goto bad6;
652	}
653
654	s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
655							 tracked_chunk_cache);
656	if (!s->tracked_chunk_pool) {
657		ti->error = "Could not allocate tracked_chunk mempool for "
658			    "tracking reads";
659		goto bad_tracked_chunk_pool;
660	}
661
662	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
663		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
664
665	spin_lock_init(&s->tracked_chunk_lock);
666
667	/* Metadata must only be loaded into one table at once */
668	r = s->store.read_metadata(&s->store, dm_add_exception, (void *)s);
669	if (r < 0) {
670		ti->error = "Failed to read snapshot metadata";
671		goto bad_load_and_register;
672	} else if (r > 0) {
673		s->valid = 0;
674		DMWARN("Snapshot is marked invalid.");
675	}
676
677	bio_list_init(&s->queued_bios);
678	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
679
680	/* Add snapshot to the list of snapshots for this origin */
681	/* Exceptions aren't triggered till snapshot_resume() is called */
682	if (register_snapshot(s)) {
683		r = -EINVAL;
684		ti->error = "Cannot register snapshot origin";
685		goto bad_load_and_register;
686	}
687
688	ti->private = s;
689	ti->split_io = s->chunk_size;
690
691	return 0;
692
693 bad_load_and_register:
694	mempool_destroy(s->tracked_chunk_pool);
695
696 bad_tracked_chunk_pool:
697	mempool_destroy(s->pending_pool);
698
699 bad6:
700	dm_kcopyd_client_destroy(s->kcopyd_client);
701
702 bad5:
703	s->store.destroy(&s->store);
704
705 bad4:
706	exit_exception_table(&s->pending, pending_cache);
707	exit_exception_table(&s->complete, exception_cache);
708
709 bad3:
710	dm_put_device(ti, s->cow);
711	dm_put_device(ti, s->origin);
712
713 bad2:
714	kfree(s);
715
716 bad1:
717	return r;
718}
719
720static void __free_exceptions(struct dm_snapshot *s)
721{
722	dm_kcopyd_client_destroy(s->kcopyd_client);
723	s->kcopyd_client = NULL;
724
725	exit_exception_table(&s->pending, pending_cache);
726	exit_exception_table(&s->complete, exception_cache);
727
728	s->store.destroy(&s->store);
729}
730
731static void snapshot_dtr(struct dm_target *ti)
732{
733#ifdef CONFIG_DM_DEBUG
734	int i;
735#endif
736	struct dm_snapshot *s = ti->private;
737
738	flush_workqueue(ksnapd);
739
740	/* Prevent further origin writes from using this snapshot. */
741	/* After this returns there can be no new kcopyd jobs. */
742	unregister_snapshot(s);
743
744	while (atomic_read(&s->pending_exceptions_count))
745		msleep(1);
746	/*
747	 * Ensure instructions in mempool_destroy aren't reordered
748	 * before atomic_read.
749	 */
750	smp_mb();
751
752#ifdef CONFIG_DM_DEBUG
753	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
754		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
755#endif
756
757	mempool_destroy(s->tracked_chunk_pool);
758
759	__free_exceptions(s);
760
761	mempool_destroy(s->pending_pool);
762
763	dm_put_device(ti, s->origin);
764	dm_put_device(ti, s->cow);
765
766	kfree(s);
767}
768
769/*
770 * Flush a list of buffers.
771 */
772static void flush_bios(struct bio *bio)
773{
774	struct bio *n;
775
776	while (bio) {
777		n = bio->bi_next;
778		bio->bi_next = NULL;
779		generic_make_request(bio);
780		bio = n;
781	}
782}
783
784static void flush_queued_bios(struct work_struct *work)
785{
786	struct dm_snapshot *s =
787		container_of(work, struct dm_snapshot, queued_bios_work);
788	struct bio *queued_bios;
789	unsigned long flags;
790
791	spin_lock_irqsave(&s->pe_lock, flags);
792	queued_bios = bio_list_get(&s->queued_bios);
793	spin_unlock_irqrestore(&s->pe_lock, flags);
794
795	flush_bios(queued_bios);
796}
797
798/*
799 * Error a list of buffers.
800 */
801static void error_bios(struct bio *bio)
802{
803	struct bio *n;
804
805	while (bio) {
806		n = bio->bi_next;
807		bio->bi_next = NULL;
808		bio_io_error(bio);
809		bio = n;
810	}
811}
812
813static void __invalidate_snapshot(struct dm_snapshot *s, int err)
814{
815	if (!s->valid)
816		return;
817
818	if (err == -EIO)
819		DMERR("Invalidating snapshot: Error reading/writing.");
820	else if (err == -ENOMEM)
821		DMERR("Invalidating snapshot: Unable to allocate exception.");
822
823	if (s->store.drop_snapshot)
824		s->store.drop_snapshot(&s->store);
825
826	s->valid = 0;
827
828	dm_table_event(s->ti->table);
829}
830
831static void get_pending_exception(struct dm_snap_pending_exception *pe)
832{
833	atomic_inc(&pe->ref_count);
834}
835
836static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
837{
838	struct dm_snap_pending_exception *primary_pe;
839	struct bio *origin_bios = NULL;
840
841	primary_pe = pe->primary_pe;
842
843	/*
844	 * If this pe is involved in a write to the origin and
845	 * it is the last sibling to complete then release
846	 * the bios for the original write to the origin.
847	 */
848	if (primary_pe &&
849	    atomic_dec_and_test(&primary_pe->ref_count)) {
850		origin_bios = bio_list_get(&primary_pe->origin_bios);
851		free_pending_exception(primary_pe);
852	}
853
854	/*
855	 * Free the pe if it's not linked to an origin write or if
856	 * it's not itself a primary pe.
857	 */
858	if (!primary_pe || primary_pe != pe)
859		free_pending_exception(pe);
860
861	return origin_bios;
862}
863
864static void pending_complete(struct dm_snap_pending_exception *pe, int success)
865{
866	struct dm_snap_exception *e;
867	struct dm_snapshot *s = pe->snap;
868	struct bio *origin_bios = NULL;
869	struct bio *snapshot_bios = NULL;
870	int error = 0;
871
872	if (!success) {
873		/* Read/write error - snapshot is unusable */
874		down_write(&s->lock);
875		__invalidate_snapshot(s, -EIO);
876		error = 1;
877		goto out;
878	}
879
880	e = alloc_exception();
881	if (!e) {
882		down_write(&s->lock);
883		__invalidate_snapshot(s, -ENOMEM);
884		error = 1;
885		goto out;
886	}
887	*e = pe->e;
888
889	down_write(&s->lock);
890	if (!s->valid) {
891		free_exception(e);
892		error = 1;
893		goto out;
894	}
895
896	/*
897	 * Check for conflicting reads. This is extremely improbable,
898	 * so msleep(1) is sufficient and there is no need for a wait queue.
899	 */
900	while (__chunk_is_tracked(s, pe->e.old_chunk))
901		msleep(1);
902
903	/*
904	 * Add a proper exception, and remove the
905	 * in-flight exception from the list.
906	 */
907	insert_completed_exception(s, e);
908
909 out:
910	remove_exception(&pe->e);
911	snapshot_bios = bio_list_get(&pe->snapshot_bios);
912	origin_bios = put_pending_exception(pe);
913
914	up_write(&s->lock);
915
916	/* Submit any pending write bios */
917	if (error)
918		error_bios(snapshot_bios);
919	else
920		flush_bios(snapshot_bios);
921
922	flush_bios(origin_bios);
923}
924
925static void commit_callback(void *context, int success)
926{
927	struct dm_snap_pending_exception *pe = context;
928
929	pending_complete(pe, success);
930}
931
932/*
933 * Called when the copy I/O has finished.  kcopyd actually runs
934 * this code so don't block.
935 */
936static void copy_callback(int read_err, unsigned long write_err, void *context)
937{
938	struct dm_snap_pending_exception *pe = context;
939	struct dm_snapshot *s = pe->snap;
940
941	if (read_err || write_err)
942		pending_complete(pe, 0);
943
944	else
945		/* Update the metadata if we are persistent */
946		s->store.commit_exception(&s->store, &pe->e, commit_callback,
947					  pe);
948}
949
950/*
951 * Dispatches the copy operation to kcopyd.
952 */
953static void start_copy(struct dm_snap_pending_exception *pe)
954{
955	struct dm_snapshot *s = pe->snap;
956	struct dm_io_region src, dest;
957	struct block_device *bdev = s->origin->bdev;
958	sector_t dev_size;
959
960	dev_size = get_dev_size(bdev);
961
962	src.bdev = bdev;
963	src.sector = chunk_to_sector(s, pe->e.old_chunk);
964	src.count = min(s->chunk_size, dev_size - src.sector);
965
966	dest.bdev = s->cow->bdev;
967	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
968	dest.count = src.count;
969
970	/* Hand over to kcopyd */
971	dm_kcopyd_copy(s->kcopyd_client,
972		    &src, 1, &dest, 0, copy_callback, pe);
973}
974
975static struct dm_snap_pending_exception *
976__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
977{
978	struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
979
980	if (!e)
981		return NULL;
982
983	return container_of(e, struct dm_snap_pending_exception, e);
984}
985
986/*
987 * Looks to see if this snapshot already has a pending exception
988 * for this chunk, otherwise it allocates a new one and inserts
989 * it into the pending table.
990 *
991 * NOTE: a write lock must be held on snap->lock before calling
992 * this.
993 */
994static struct dm_snap_pending_exception *
995__find_pending_exception(struct dm_snapshot *s,
996			 struct dm_snap_pending_exception *pe, chunk_t chunk)
997{
998	struct dm_snap_pending_exception *pe2;
999
1000	pe2 = __lookup_pending_exception(s, chunk);
1001	if (pe2) {
1002		free_pending_exception(pe);
1003		return pe2;
1004	}
1005
1006	pe->e.old_chunk = chunk;
1007	bio_list_init(&pe->origin_bios);
1008	bio_list_init(&pe->snapshot_bios);
1009	pe->primary_pe = NULL;
1010	atomic_set(&pe->ref_count, 0);
1011	pe->started = 0;
1012
1013	if (s->store.prepare_exception(&s->store, &pe->e)) {
1014		free_pending_exception(pe);
1015		return NULL;
1016	}
1017
1018	get_pending_exception(pe);
1019	insert_exception(&s->pending, &pe->e);
1020
1021	return pe;
1022}
1023
1024static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1025			    struct bio *bio, chunk_t chunk)
1026{
1027	bio->bi_bdev = s->cow->bdev;
1028	bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
1029			 (chunk - e->old_chunk)) +
1030			 (bio->bi_sector & s->chunk_mask);
1031}
1032
1033static int snapshot_map(struct dm_target *ti, struct bio *bio,
1034			union map_info *map_context)
1035{
1036	struct dm_snap_exception *e;
1037	struct dm_snapshot *s = ti->private;
1038	int r = DM_MAPIO_REMAPPED;
1039	chunk_t chunk;
1040	struct dm_snap_pending_exception *pe = NULL;
1041
1042	chunk = sector_to_chunk(s, bio->bi_sector);
1043
1044	/* Full snapshots are not usable */
1045	/* To get here the table must be live so s->active is always set. */
1046	if (!s->valid)
1047		return -EIO;
1048
1049	/* FIXME: should only take write lock if we need
1050	 * to copy an exception */
1051	down_write(&s->lock);
1052
1053	if (!s->valid) {
1054		r = -EIO;
1055		goto out_unlock;
1056	}
1057
1058	/* If the block is already remapped - use that, else remap it */
1059	e = lookup_exception(&s->complete, chunk);
1060	if (e) {
1061		remap_exception(s, e, bio, chunk);
1062		goto out_unlock;
1063	}
1064
1065	/*
1066	 * Write to snapshot - higher level takes care of RW/RO
1067	 * flags so we should only get this if we are
1068	 * writeable.
1069	 */
1070	if (bio_rw(bio) == WRITE) {
1071		pe = __lookup_pending_exception(s, chunk);
1072		if (!pe) {
1073			up_write(&s->lock);
1074			pe = alloc_pending_exception(s);
1075			down_write(&s->lock);
1076
1077			if (!s->valid) {
1078				free_pending_exception(pe);
1079				r = -EIO;
1080				goto out_unlock;
1081			}
1082
1083			e = lookup_exception(&s->complete, chunk);
1084			if (e) {
1085				free_pending_exception(pe);
1086				remap_exception(s, e, bio, chunk);
1087				goto out_unlock;
1088			}
1089
1090			pe = __find_pending_exception(s, pe, chunk);
1091			if (!pe) {
1092				__invalidate_snapshot(s, -ENOMEM);
1093				r = -EIO;
1094				goto out_unlock;
1095			}
1096		}
1097
1098		remap_exception(s, &pe->e, bio, chunk);
1099		bio_list_add(&pe->snapshot_bios, bio);
1100
1101		r = DM_MAPIO_SUBMITTED;
1102
1103		if (!pe->started) {
1104			/* this is protected by snap->lock */
1105			pe->started = 1;
1106			up_write(&s->lock);
1107			start_copy(pe);
1108			goto out;
1109		}
1110	} else {
1111		bio->bi_bdev = s->origin->bdev;
1112		map_context->ptr = track_chunk(s, chunk);
1113	}
1114
1115 out_unlock:
1116	up_write(&s->lock);
1117 out:
1118	return r;
1119}
1120
1121static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1122			   int error, union map_info *map_context)
1123{
1124	struct dm_snapshot *s = ti->private;
1125	struct dm_snap_tracked_chunk *c = map_context->ptr;
1126
1127	if (c)
1128		stop_tracking_chunk(s, c);
1129
1130	return 0;
1131}
1132
1133static void snapshot_resume(struct dm_target *ti)
1134{
1135	struct dm_snapshot *s = ti->private;
1136
1137	down_write(&s->lock);
1138	s->active = 1;
1139	up_write(&s->lock);
1140}
1141
1142static int snapshot_status(struct dm_target *ti, status_type_t type,
1143			   char *result, unsigned int maxlen)
1144{
1145	struct dm_snapshot *snap = ti->private;
1146
1147	switch (type) {
1148	case STATUSTYPE_INFO:
1149		if (!snap->valid)
1150			snprintf(result, maxlen, "Invalid");
1151		else {
1152			if (snap->store.fraction_full) {
1153				sector_t numerator, denominator;
1154				snap->store.fraction_full(&snap->store,
1155							  &numerator,
1156							  &denominator);
1157				snprintf(result, maxlen, "%llu/%llu",
1158					(unsigned long long)numerator,
1159					(unsigned long long)denominator);
1160			}
1161			else
1162				snprintf(result, maxlen, "Unknown");
1163		}
1164		break;
1165
1166	case STATUSTYPE_TABLE:
1167		/*
1168		 * kdevname returns a static pointer so we need
1169		 * to make private copies if the output is to
1170		 * make sense.
1171		 */
1172		snprintf(result, maxlen, "%s %s %c %llu",
1173			 snap->origin->name, snap->cow->name,
1174			 snap->type,
1175			 (unsigned long long)snap->chunk_size);
1176		break;
1177	}
1178
1179	return 0;
1180}
1181
1182/*-----------------------------------------------------------------
1183 * Origin methods
1184 *---------------------------------------------------------------*/
1185static int __origin_write(struct list_head *snapshots, struct bio *bio)
1186{
1187	int r = DM_MAPIO_REMAPPED, first = 0;
1188	struct dm_snapshot *snap;
1189	struct dm_snap_exception *e;
1190	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1191	chunk_t chunk;
1192	LIST_HEAD(pe_queue);
1193
1194	/* Do all the snapshots on this origin */
1195	list_for_each_entry (snap, snapshots, list) {
1196
1197		down_write(&snap->lock);
1198
1199		/* Only deal with valid and active snapshots */
1200		if (!snap->valid || !snap->active)
1201			goto next_snapshot;
1202
1203		/* Nothing to do if writing beyond end of snapshot */
1204		if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1205			goto next_snapshot;
1206
1207		/*
1208		 * Remember, different snapshots can have
1209		 * different chunk sizes.
1210		 */
1211		chunk = sector_to_chunk(snap, bio->bi_sector);
1212
1213		/*
1214		 * Check exception table to see if block
1215		 * is already remapped in this snapshot
1216		 * and trigger an exception if not.
1217		 *
1218		 * ref_count is initialised to 1 so pending_complete()
1219		 * won't destroy the primary_pe while we're inside this loop.
1220		 */
1221		e = lookup_exception(&snap->complete, chunk);
1222		if (e)
1223			goto next_snapshot;
1224
1225		pe = __lookup_pending_exception(snap, chunk);
1226		if (!pe) {
1227			up_write(&snap->lock);
1228			pe = alloc_pending_exception(snap);
1229			down_write(&snap->lock);
1230
1231			if (!snap->valid) {
1232				free_pending_exception(pe);
1233				goto next_snapshot;
1234			}
1235
1236			e = lookup_exception(&snap->complete, chunk);
1237			if (e) {
1238				free_pending_exception(pe);
1239				goto next_snapshot;
1240			}
1241
1242			pe = __find_pending_exception(snap, pe, chunk);
1243			if (!pe) {
1244				__invalidate_snapshot(snap, -ENOMEM);
1245				goto next_snapshot;
1246			}
1247		}
1248
1249		if (!primary_pe) {
1250			/*
1251			 * Either every pe here has same
1252			 * primary_pe or none has one yet.
1253			 */
1254			if (pe->primary_pe)
1255				primary_pe = pe->primary_pe;
1256			else {
1257				primary_pe = pe;
1258				first = 1;
1259			}
1260
1261			bio_list_add(&primary_pe->origin_bios, bio);
1262
1263			r = DM_MAPIO_SUBMITTED;
1264		}
1265
1266		if (!pe->primary_pe) {
1267			pe->primary_pe = primary_pe;
1268			get_pending_exception(primary_pe);
1269		}
1270
1271		if (!pe->started) {
1272			pe->started = 1;
1273			list_add_tail(&pe->list, &pe_queue);
1274		}
1275
1276 next_snapshot:
1277		up_write(&snap->lock);
1278	}
1279
1280	if (!primary_pe)
1281		return r;
1282
1283	/*
1284	 * If this is the first time we're processing this chunk and
1285	 * ref_count is now 1 it means all the pending exceptions
1286	 * got completed while we were in the loop above, so it falls to
1287	 * us here to remove the primary_pe and submit any origin_bios.
1288	 */
1289
1290	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1291		flush_bios(bio_list_get(&primary_pe->origin_bios));
1292		free_pending_exception(primary_pe);
1293		/* If we got here, pe_queue is necessarily empty. */
1294		return r;
1295	}
1296
1297	/*
1298	 * Now that we have a complete pe list we can start the copying.
1299	 */
1300	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1301		start_copy(pe);
1302
1303	return r;
1304}
1305
1306/*
1307 * Called on a write from the origin driver.
1308 */
1309static int do_origin(struct dm_dev *origin, struct bio *bio)
1310{
1311	struct origin *o;
1312	int r = DM_MAPIO_REMAPPED;
1313
1314	down_read(&_origins_lock);
1315	o = __lookup_origin(origin->bdev);
1316	if (o)
1317		r = __origin_write(&o->snapshots, bio);
1318	up_read(&_origins_lock);
1319
1320	return r;
1321}
1322
1323/*
1324 * Origin: maps a linear range of a device, with hooks for snapshotting.
1325 */
1326
1327/*
1328 * Construct an origin mapping: <dev_path>
1329 * The context for an origin is merely a 'struct dm_dev *'
1330 * pointing to the real device.
1331 */
1332static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1333{
1334	int r;
1335	struct dm_dev *dev;
1336
1337	if (argc != 1) {
1338		ti->error = "origin: incorrect number of arguments";
1339		return -EINVAL;
1340	}
1341
1342	r = dm_get_device(ti, argv[0], 0, ti->len,
1343			  dm_table_get_mode(ti->table), &dev);
1344	if (r) {
1345		ti->error = "Cannot get target device";
1346		return r;
1347	}
1348
1349	ti->private = dev;
1350	return 0;
1351}
1352
1353static void origin_dtr(struct dm_target *ti)
1354{
1355	struct dm_dev *dev = ti->private;
1356	dm_put_device(ti, dev);
1357}
1358
1359static int origin_map(struct dm_target *ti, struct bio *bio,
1360		      union map_info *map_context)
1361{
1362	struct dm_dev *dev = ti->private;
1363	bio->bi_bdev = dev->bdev;
1364
1365	/* Only tell snapshots if this is a write */
1366	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1367}
1368
1369#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1370
1371/*
1372 * Set the target "split_io" field to the minimum of all the snapshots'
1373 * chunk sizes.
1374 */
1375static void origin_resume(struct dm_target *ti)
1376{
1377	struct dm_dev *dev = ti->private;
1378	struct dm_snapshot *snap;
1379	struct origin *o;
1380	chunk_t chunk_size = 0;
1381
1382	down_read(&_origins_lock);
1383	o = __lookup_origin(dev->bdev);
1384	if (o)
1385		list_for_each_entry (snap, &o->snapshots, list)
1386			chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1387	up_read(&_origins_lock);
1388
1389	ti->split_io = chunk_size;
1390}
1391
1392static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1393			 unsigned int maxlen)
1394{
1395	struct dm_dev *dev = ti->private;
1396
1397	switch (type) {
1398	case STATUSTYPE_INFO:
1399		result[0] = '\0';
1400		break;
1401
1402	case STATUSTYPE_TABLE:
1403		snprintf(result, maxlen, "%s", dev->name);
1404		break;
1405	}
1406
1407	return 0;
1408}
1409
1410static struct target_type origin_target = {
1411	.name    = "snapshot-origin",
1412	.version = {1, 6, 0},
1413	.module  = THIS_MODULE,
1414	.ctr     = origin_ctr,
1415	.dtr     = origin_dtr,
1416	.map     = origin_map,
1417	.resume  = origin_resume,
1418	.status  = origin_status,
1419};
1420
1421static struct target_type snapshot_target = {
1422	.name    = "snapshot",
1423	.version = {1, 6, 0},
1424	.module  = THIS_MODULE,
1425	.ctr     = snapshot_ctr,
1426	.dtr     = snapshot_dtr,
1427	.map     = snapshot_map,
1428	.end_io  = snapshot_end_io,
1429	.resume  = snapshot_resume,
1430	.status  = snapshot_status,
1431};
1432
1433static int __init dm_snapshot_init(void)
1434{
1435	int r;
1436
1437	r = dm_exception_store_init();
1438	if (r) {
1439		DMERR("Failed to initialize exception stores");
1440		return r;
1441	}
1442
1443	r = dm_register_target(&snapshot_target);
1444	if (r) {
1445		DMERR("snapshot target register failed %d", r);
1446		return r;
1447	}
1448
1449	r = dm_register_target(&origin_target);
1450	if (r < 0) {
1451		DMERR("Origin target register failed %d", r);
1452		goto bad1;
1453	}
1454
1455	r = init_origin_hash();
1456	if (r) {
1457		DMERR("init_origin_hash failed.");
1458		goto bad2;
1459	}
1460
1461	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1462	if (!exception_cache) {
1463		DMERR("Couldn't create exception cache.");
1464		r = -ENOMEM;
1465		goto bad3;
1466	}
1467
1468	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1469	if (!pending_cache) {
1470		DMERR("Couldn't create pending cache.");
1471		r = -ENOMEM;
1472		goto bad4;
1473	}
1474
1475	tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1476	if (!tracked_chunk_cache) {
1477		DMERR("Couldn't create cache to track chunks in use.");
1478		r = -ENOMEM;
1479		goto bad5;
1480	}
1481
1482	ksnapd = create_singlethread_workqueue("ksnapd");
1483	if (!ksnapd) {
1484		DMERR("Failed to create ksnapd workqueue.");
1485		r = -ENOMEM;
1486		goto bad_pending_pool;
1487	}
1488
1489	return 0;
1490
1491bad_pending_pool:
1492	kmem_cache_destroy(tracked_chunk_cache);
1493bad5:
1494	kmem_cache_destroy(pending_cache);
1495bad4:
1496	kmem_cache_destroy(exception_cache);
1497bad3:
1498	exit_origin_hash();
1499bad2:
1500	dm_unregister_target(&origin_target);
1501bad1:
1502	dm_unregister_target(&snapshot_target);
1503	return r;
1504}
1505
1506static void __exit dm_snapshot_exit(void)
1507{
1508	destroy_workqueue(ksnapd);
1509
1510	dm_unregister_target(&snapshot_target);
1511	dm_unregister_target(&origin_target);
1512
1513	exit_origin_hash();
1514	kmem_cache_destroy(pending_cache);
1515	kmem_cache_destroy(exception_cache);
1516	kmem_cache_destroy(tracked_chunk_cache);
1517
1518	dm_exception_store_exit();
1519}
1520
1521/* Module hooks */
1522module_init(dm_snapshot_init);
1523module_exit(dm_snapshot_exit);
1524
1525MODULE_DESCRIPTION(DM_NAME " snapshot target");
1526MODULE_AUTHOR("Joe Thornber");
1527MODULE_LICENSE("GPL");
1528