1/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
10#include <linux/device-mapper.h>
11#include <linux/delay.h>
12#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
22
23#include "dm-exception-store.h"
24
25#define DM_MSG_PREFIX "snapshots"
26
27static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
28
29#define dm_target_is_snapshot_merge(ti) \
30	((ti)->type->name == dm_snapshot_merge_target_name)
31
32/*
33 * The size of the mempool used to track chunks in use.
34 */
35#define MIN_IOS 256
36
37#define DM_TRACKED_CHUNK_HASH_SIZE	16
38#define DM_TRACKED_CHUNK_HASH(x)	((unsigned long)(x) & \
39					 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
40
41struct dm_exception_table {
42	uint32_t hash_mask;
43	unsigned hash_shift;
44	struct list_head *table;
45};
46
47struct dm_snapshot {
48	struct rw_semaphore lock;
49
50	struct dm_dev *origin;
51	struct dm_dev *cow;
52
53	struct dm_target *ti;
54
55	/* List of snapshots per Origin */
56	struct list_head list;
57
58	/*
59	 * You can't use a snapshot if this is 0 (e.g. if full).
60	 * A snapshot-merge target never clears this.
61	 */
62	int valid;
63
64	/* Origin writes don't trigger exceptions until this is set */
65	int active;
66
67	atomic_t pending_exceptions_count;
68
69	/* Protected by "lock" */
70	sector_t exception_start_sequence;
71
72	/* Protected by kcopyd single-threaded callback */
73	sector_t exception_complete_sequence;
74
75	/*
76	 * A list of pending exceptions that completed out of order.
77	 * Protected by kcopyd single-threaded callback.
78	 */
79	struct list_head out_of_order_list;
80
81	mempool_t *pending_pool;
82
83	struct dm_exception_table pending;
84	struct dm_exception_table complete;
85
86	/*
87	 * pe_lock protects all pending_exception operations and access
88	 * as well as the snapshot_bios list.
89	 */
90	spinlock_t pe_lock;
91
92	/* Chunks with outstanding reads */
93	spinlock_t tracked_chunk_lock;
94	struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
95
96	/* The on disk metadata handler */
97	struct dm_exception_store *store;
98
99	struct dm_kcopyd_client *kcopyd_client;
100
101	/* Wait for events based on state_bits */
102	unsigned long state_bits;
103
104	/* Range of chunks currently being merged. */
105	chunk_t first_merging_chunk;
106	int num_merging_chunks;
107
108	/*
109	 * The merge operation failed if this flag is set.
110	 * Failure modes are handled as follows:
111	 * - I/O error reading the header
112	 *   	=> don't load the target; abort.
113	 * - Header does not have "valid" flag set
114	 *   	=> use the origin; forget about the snapshot.
115	 * - I/O error when reading exceptions
116	 *   	=> don't load the target; abort.
117	 *         (We can't use the intermediate origin state.)
118	 * - I/O error while merging
119	 *	=> stop merging; set merge_failed; process I/O normally.
120	 */
121	int merge_failed;
122
123	/*
124	 * Incoming bios that overlap with chunks being merged must wait
125	 * for them to be committed.
126	 */
127	struct bio_list bios_queued_during_merge;
128};
129
130/*
131 * state_bits:
132 *   RUNNING_MERGE  - Merge operation is in progress.
133 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
134 *                    cleared afterwards.
135 */
136#define RUNNING_MERGE          0
137#define SHUTDOWN_MERGE         1
138
139DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
140		"A percentage of time allocated for copy on write");
141
142struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
143{
144	return s->origin;
145}
146EXPORT_SYMBOL(dm_snap_origin);
147
148struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
149{
150	return s->cow;
151}
152EXPORT_SYMBOL(dm_snap_cow);
153
154static sector_t chunk_to_sector(struct dm_exception_store *store,
155				chunk_t chunk)
156{
157	return chunk << store->chunk_shift;
158}
159
160static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
161{
162	/*
163	 * There is only ever one instance of a particular block
164	 * device so we can compare pointers safely.
165	 */
166	return lhs == rhs;
167}
168
169struct dm_snap_pending_exception {
170	struct dm_exception e;
171
172	/*
173	 * Origin buffers waiting for this to complete are held
174	 * in a bio list
175	 */
176	struct bio_list origin_bios;
177	struct bio_list snapshot_bios;
178
179	/* Pointer back to snapshot context */
180	struct dm_snapshot *snap;
181
182	/*
183	 * 1 indicates the exception has already been sent to
184	 * kcopyd.
185	 */
186	int started;
187
188	/* There was copying error. */
189	int copy_error;
190
191	/* A sequence number, it is used for in-order completion. */
192	sector_t exception_sequence;
193
194	struct list_head out_of_order_entry;
195
196	/*
197	 * For writing a complete chunk, bypassing the copy.
198	 */
199	struct bio *full_bio;
200	bio_end_io_t *full_bio_end_io;
201	void *full_bio_private;
202};
203
204/*
205 * Hash table mapping origin volumes to lists of snapshots and
206 * a lock to protect it
207 */
208static struct kmem_cache *exception_cache;
209static struct kmem_cache *pending_cache;
210
211struct dm_snap_tracked_chunk {
212	struct hlist_node node;
213	chunk_t chunk;
214};
215
216static void init_tracked_chunk(struct bio *bio)
217{
218	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
219	INIT_HLIST_NODE(&c->node);
220}
221
222static bool is_bio_tracked(struct bio *bio)
223{
224	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
225	return !hlist_unhashed(&c->node);
226}
227
228static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
229{
230	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
231
232	c->chunk = chunk;
233
234	spin_lock_irq(&s->tracked_chunk_lock);
235	hlist_add_head(&c->node,
236		       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
237	spin_unlock_irq(&s->tracked_chunk_lock);
238}
239
240static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
241{
242	struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
243	unsigned long flags;
244
245	spin_lock_irqsave(&s->tracked_chunk_lock, flags);
246	hlist_del(&c->node);
247	spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
248}
249
250static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
251{
252	struct dm_snap_tracked_chunk *c;
253	int found = 0;
254
255	spin_lock_irq(&s->tracked_chunk_lock);
256
257	hlist_for_each_entry(c,
258	    &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
259		if (c->chunk == chunk) {
260			found = 1;
261			break;
262		}
263	}
264
265	spin_unlock_irq(&s->tracked_chunk_lock);
266
267	return found;
268}
269
270/*
271 * This conflicting I/O is extremely improbable in the caller,
272 * so msleep(1) is sufficient and there is no need for a wait queue.
273 */
274static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
275{
276	while (__chunk_is_tracked(s, chunk))
277		msleep(1);
278}
279
280/*
281 * One of these per registered origin, held in the snapshot_origins hash
282 */
283struct origin {
284	/* The origin device */
285	struct block_device *bdev;
286
287	struct list_head hash_list;
288
289	/* List of snapshots for this origin */
290	struct list_head snapshots;
291};
292
293/*
294 * Size of the hash table for origin volumes. If we make this
295 * the size of the minors list then it should be nearly perfect
296 */
297#define ORIGIN_HASH_SIZE 256
298#define ORIGIN_MASK      0xFF
299static struct list_head *_origins;
300static struct rw_semaphore _origins_lock;
301
302static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
303static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
304static uint64_t _pending_exceptions_done_count;
305
306static int init_origin_hash(void)
307{
308	int i;
309
310	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
311			   GFP_KERNEL);
312	if (!_origins) {
313		DMERR("unable to allocate memory");
314		return -ENOMEM;
315	}
316
317	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
318		INIT_LIST_HEAD(_origins + i);
319	init_rwsem(&_origins_lock);
320
321	return 0;
322}
323
324static void exit_origin_hash(void)
325{
326	kfree(_origins);
327}
328
329static unsigned origin_hash(struct block_device *bdev)
330{
331	return bdev->bd_dev & ORIGIN_MASK;
332}
333
334static struct origin *__lookup_origin(struct block_device *origin)
335{
336	struct list_head *ol;
337	struct origin *o;
338
339	ol = &_origins[origin_hash(origin)];
340	list_for_each_entry (o, ol, hash_list)
341		if (bdev_equal(o->bdev, origin))
342			return o;
343
344	return NULL;
345}
346
347static void __insert_origin(struct origin *o)
348{
349	struct list_head *sl = &_origins[origin_hash(o->bdev)];
350	list_add_tail(&o->hash_list, sl);
351}
352
353/*
354 * _origins_lock must be held when calling this function.
355 * Returns number of snapshots registered using the supplied cow device, plus:
356 * snap_src - a snapshot suitable for use as a source of exception handover
357 * snap_dest - a snapshot capable of receiving exception handover.
358 * snap_merge - an existing snapshot-merge target linked to the same origin.
359 *   There can be at most one snapshot-merge target. The parameter is optional.
360 *
361 * Possible return values and states of snap_src and snap_dest.
362 *   0: NULL, NULL  - first new snapshot
363 *   1: snap_src, NULL - normal snapshot
364 *   2: snap_src, snap_dest  - waiting for handover
365 *   2: snap_src, NULL - handed over, waiting for old to be deleted
366 *   1: NULL, snap_dest - source got destroyed without handover
367 */
368static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
369					struct dm_snapshot **snap_src,
370					struct dm_snapshot **snap_dest,
371					struct dm_snapshot **snap_merge)
372{
373	struct dm_snapshot *s;
374	struct origin *o;
375	int count = 0;
376	int active;
377
378	o = __lookup_origin(snap->origin->bdev);
379	if (!o)
380		goto out;
381
382	list_for_each_entry(s, &o->snapshots, list) {
383		if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
384			*snap_merge = s;
385		if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
386			continue;
387
388		down_read(&s->lock);
389		active = s->active;
390		up_read(&s->lock);
391
392		if (active) {
393			if (snap_src)
394				*snap_src = s;
395		} else if (snap_dest)
396			*snap_dest = s;
397
398		count++;
399	}
400
401out:
402	return count;
403}
404
405/*
406 * On success, returns 1 if this snapshot is a handover destination,
407 * otherwise returns 0.
408 */
409static int __validate_exception_handover(struct dm_snapshot *snap)
410{
411	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
412	struct dm_snapshot *snap_merge = NULL;
413
414	/* Does snapshot need exceptions handed over to it? */
415	if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
416					  &snap_merge) == 2) ||
417	    snap_dest) {
418		snap->ti->error = "Snapshot cow pairing for exception "
419				  "table handover failed";
420		return -EINVAL;
421	}
422
423	/*
424	 * If no snap_src was found, snap cannot become a handover
425	 * destination.
426	 */
427	if (!snap_src)
428		return 0;
429
430	/*
431	 * Non-snapshot-merge handover?
432	 */
433	if (!dm_target_is_snapshot_merge(snap->ti))
434		return 1;
435
436	/*
437	 * Do not allow more than one merging snapshot.
438	 */
439	if (snap_merge) {
440		snap->ti->error = "A snapshot is already merging.";
441		return -EINVAL;
442	}
443
444	if (!snap_src->store->type->prepare_merge ||
445	    !snap_src->store->type->commit_merge) {
446		snap->ti->error = "Snapshot exception store does not "
447				  "support snapshot-merge.";
448		return -EINVAL;
449	}
450
451	return 1;
452}
453
454static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
455{
456	struct dm_snapshot *l;
457
458	/* Sort the list according to chunk size, largest-first smallest-last */
459	list_for_each_entry(l, &o->snapshots, list)
460		if (l->store->chunk_size < s->store->chunk_size)
461			break;
462	list_add_tail(&s->list, &l->list);
463}
464
465/*
466 * Make a note of the snapshot and its origin so we can look it
467 * up when the origin has a write on it.
468 *
469 * Also validate snapshot exception store handovers.
470 * On success, returns 1 if this registration is a handover destination,
471 * otherwise returns 0.
472 */
473static int register_snapshot(struct dm_snapshot *snap)
474{
475	struct origin *o, *new_o = NULL;
476	struct block_device *bdev = snap->origin->bdev;
477	int r = 0;
478
479	new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
480	if (!new_o)
481		return -ENOMEM;
482
483	down_write(&_origins_lock);
484
485	r = __validate_exception_handover(snap);
486	if (r < 0) {
487		kfree(new_o);
488		goto out;
489	}
490
491	o = __lookup_origin(bdev);
492	if (o)
493		kfree(new_o);
494	else {
495		/* New origin */
496		o = new_o;
497
498		/* Initialise the struct */
499		INIT_LIST_HEAD(&o->snapshots);
500		o->bdev = bdev;
501
502		__insert_origin(o);
503	}
504
505	__insert_snapshot(o, snap);
506
507out:
508	up_write(&_origins_lock);
509
510	return r;
511}
512
513/*
514 * Move snapshot to correct place in list according to chunk size.
515 */
516static void reregister_snapshot(struct dm_snapshot *s)
517{
518	struct block_device *bdev = s->origin->bdev;
519
520	down_write(&_origins_lock);
521
522	list_del(&s->list);
523	__insert_snapshot(__lookup_origin(bdev), s);
524
525	up_write(&_origins_lock);
526}
527
528static void unregister_snapshot(struct dm_snapshot *s)
529{
530	struct origin *o;
531
532	down_write(&_origins_lock);
533	o = __lookup_origin(s->origin->bdev);
534
535	list_del(&s->list);
536	if (o && list_empty(&o->snapshots)) {
537		list_del(&o->hash_list);
538		kfree(o);
539	}
540
541	up_write(&_origins_lock);
542}
543
544/*
545 * Implementation of the exception hash tables.
546 * The lowest hash_shift bits of the chunk number are ignored, allowing
547 * some consecutive chunks to be grouped together.
548 */
549static int dm_exception_table_init(struct dm_exception_table *et,
550				   uint32_t size, unsigned hash_shift)
551{
552	unsigned int i;
553
554	et->hash_shift = hash_shift;
555	et->hash_mask = size - 1;
556	et->table = dm_vcalloc(size, sizeof(struct list_head));
557	if (!et->table)
558		return -ENOMEM;
559
560	for (i = 0; i < size; i++)
561		INIT_LIST_HEAD(et->table + i);
562
563	return 0;
564}
565
566static void dm_exception_table_exit(struct dm_exception_table *et,
567				    struct kmem_cache *mem)
568{
569	struct list_head *slot;
570	struct dm_exception *ex, *next;
571	int i, size;
572
573	size = et->hash_mask + 1;
574	for (i = 0; i < size; i++) {
575		slot = et->table + i;
576
577		list_for_each_entry_safe (ex, next, slot, hash_list)
578			kmem_cache_free(mem, ex);
579	}
580
581	vfree(et->table);
582}
583
584static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
585{
586	return (chunk >> et->hash_shift) & et->hash_mask;
587}
588
589static void dm_remove_exception(struct dm_exception *e)
590{
591	list_del(&e->hash_list);
592}
593
594/*
595 * Return the exception data for a sector, or NULL if not
596 * remapped.
597 */
598static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
599						chunk_t chunk)
600{
601	struct list_head *slot;
602	struct dm_exception *e;
603
604	slot = &et->table[exception_hash(et, chunk)];
605	list_for_each_entry (e, slot, hash_list)
606		if (chunk >= e->old_chunk &&
607		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
608			return e;
609
610	return NULL;
611}
612
613static struct dm_exception *alloc_completed_exception(gfp_t gfp)
614{
615	struct dm_exception *e;
616
617	e = kmem_cache_alloc(exception_cache, gfp);
618	if (!e && gfp == GFP_NOIO)
619		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
620
621	return e;
622}
623
624static void free_completed_exception(struct dm_exception *e)
625{
626	kmem_cache_free(exception_cache, e);
627}
628
629static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
630{
631	struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
632							     GFP_NOIO);
633
634	atomic_inc(&s->pending_exceptions_count);
635	pe->snap = s;
636
637	return pe;
638}
639
640static void free_pending_exception(struct dm_snap_pending_exception *pe)
641{
642	struct dm_snapshot *s = pe->snap;
643
644	mempool_free(pe, s->pending_pool);
645	smp_mb__before_atomic();
646	atomic_dec(&s->pending_exceptions_count);
647}
648
649static void dm_insert_exception(struct dm_exception_table *eh,
650				struct dm_exception *new_e)
651{
652	struct list_head *l;
653	struct dm_exception *e = NULL;
654
655	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
656
657	/* Add immediately if this table doesn't support consecutive chunks */
658	if (!eh->hash_shift)
659		goto out;
660
661	/* List is ordered by old_chunk */
662	list_for_each_entry_reverse(e, l, hash_list) {
663		/* Insert after an existing chunk? */
664		if (new_e->old_chunk == (e->old_chunk +
665					 dm_consecutive_chunk_count(e) + 1) &&
666		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
667					 dm_consecutive_chunk_count(e) + 1)) {
668			dm_consecutive_chunk_count_inc(e);
669			free_completed_exception(new_e);
670			return;
671		}
672
673		/* Insert before an existing chunk? */
674		if (new_e->old_chunk == (e->old_chunk - 1) &&
675		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
676			dm_consecutive_chunk_count_inc(e);
677			e->old_chunk--;
678			e->new_chunk--;
679			free_completed_exception(new_e);
680			return;
681		}
682
683		if (new_e->old_chunk > e->old_chunk)
684			break;
685	}
686
687out:
688	list_add(&new_e->hash_list, e ? &e->hash_list : l);
689}
690
691/*
692 * Callback used by the exception stores to load exceptions when
693 * initialising.
694 */
695static int dm_add_exception(void *context, chunk_t old, chunk_t new)
696{
697	struct dm_snapshot *s = context;
698	struct dm_exception *e;
699
700	e = alloc_completed_exception(GFP_KERNEL);
701	if (!e)
702		return -ENOMEM;
703
704	e->old_chunk = old;
705
706	/* Consecutive_count is implicitly initialised to zero */
707	e->new_chunk = new;
708
709	dm_insert_exception(&s->complete, e);
710
711	return 0;
712}
713
714/*
715 * Return a minimum chunk size of all snapshots that have the specified origin.
716 * Return zero if the origin has no snapshots.
717 */
718static uint32_t __minimum_chunk_size(struct origin *o)
719{
720	struct dm_snapshot *snap;
721	unsigned chunk_size = 0;
722
723	if (o)
724		list_for_each_entry(snap, &o->snapshots, list)
725			chunk_size = min_not_zero(chunk_size,
726						  snap->store->chunk_size);
727
728	return (uint32_t) chunk_size;
729}
730
731/*
732 * Hard coded magic.
733 */
734static int calc_max_buckets(void)
735{
736	/* use a fixed size of 2MB */
737	unsigned long mem = 2 * 1024 * 1024;
738	mem /= sizeof(struct list_head);
739
740	return mem;
741}
742
743/*
744 * Allocate room for a suitable hash table.
745 */
746static int init_hash_tables(struct dm_snapshot *s)
747{
748	sector_t hash_size, cow_dev_size, max_buckets;
749
750	/*
751	 * Calculate based on the size of the original volume or
752	 * the COW volume...
753	 */
754	cow_dev_size = get_dev_size(s->cow->bdev);
755	max_buckets = calc_max_buckets();
756
757	hash_size = cow_dev_size >> s->store->chunk_shift;
758	hash_size = min(hash_size, max_buckets);
759
760	if (hash_size < 64)
761		hash_size = 64;
762	hash_size = rounddown_pow_of_two(hash_size);
763	if (dm_exception_table_init(&s->complete, hash_size,
764				    DM_CHUNK_CONSECUTIVE_BITS))
765		return -ENOMEM;
766
767	/*
768	 * Allocate hash table for in-flight exceptions
769	 * Make this smaller than the real hash table
770	 */
771	hash_size >>= 3;
772	if (hash_size < 64)
773		hash_size = 64;
774
775	if (dm_exception_table_init(&s->pending, hash_size, 0)) {
776		dm_exception_table_exit(&s->complete, exception_cache);
777		return -ENOMEM;
778	}
779
780	return 0;
781}
782
783static void merge_shutdown(struct dm_snapshot *s)
784{
785	clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
786	smp_mb__after_atomic();
787	wake_up_bit(&s->state_bits, RUNNING_MERGE);
788}
789
790static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
791{
792	s->first_merging_chunk = 0;
793	s->num_merging_chunks = 0;
794
795	return bio_list_get(&s->bios_queued_during_merge);
796}
797
798/*
799 * Remove one chunk from the index of completed exceptions.
800 */
801static int __remove_single_exception_chunk(struct dm_snapshot *s,
802					   chunk_t old_chunk)
803{
804	struct dm_exception *e;
805
806	e = dm_lookup_exception(&s->complete, old_chunk);
807	if (!e) {
808		DMERR("Corruption detected: exception for block %llu is "
809		      "on disk but not in memory",
810		      (unsigned long long)old_chunk);
811		return -EINVAL;
812	}
813
814	/*
815	 * If this is the only chunk using this exception, remove exception.
816	 */
817	if (!dm_consecutive_chunk_count(e)) {
818		dm_remove_exception(e);
819		free_completed_exception(e);
820		return 0;
821	}
822
823	/*
824	 * The chunk may be either at the beginning or the end of a
825	 * group of consecutive chunks - never in the middle.  We are
826	 * removing chunks in the opposite order to that in which they
827	 * were added, so this should always be true.
828	 * Decrement the consecutive chunk counter and adjust the
829	 * starting point if necessary.
830	 */
831	if (old_chunk == e->old_chunk) {
832		e->old_chunk++;
833		e->new_chunk++;
834	} else if (old_chunk != e->old_chunk +
835		   dm_consecutive_chunk_count(e)) {
836		DMERR("Attempt to merge block %llu from the "
837		      "middle of a chunk range [%llu - %llu]",
838		      (unsigned long long)old_chunk,
839		      (unsigned long long)e->old_chunk,
840		      (unsigned long long)
841		      e->old_chunk + dm_consecutive_chunk_count(e));
842		return -EINVAL;
843	}
844
845	dm_consecutive_chunk_count_dec(e);
846
847	return 0;
848}
849
850static void flush_bios(struct bio *bio);
851
852static int remove_single_exception_chunk(struct dm_snapshot *s)
853{
854	struct bio *b = NULL;
855	int r;
856	chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
857
858	down_write(&s->lock);
859
860	/*
861	 * Process chunks (and associated exceptions) in reverse order
862	 * so that dm_consecutive_chunk_count_dec() accounting works.
863	 */
864	do {
865		r = __remove_single_exception_chunk(s, old_chunk);
866		if (r)
867			goto out;
868	} while (old_chunk-- > s->first_merging_chunk);
869
870	b = __release_queued_bios_after_merge(s);
871
872out:
873	up_write(&s->lock);
874	if (b)
875		flush_bios(b);
876
877	return r;
878}
879
880static int origin_write_extent(struct dm_snapshot *merging_snap,
881			       sector_t sector, unsigned chunk_size);
882
883static void merge_callback(int read_err, unsigned long write_err,
884			   void *context);
885
886static uint64_t read_pending_exceptions_done_count(void)
887{
888	uint64_t pending_exceptions_done;
889
890	spin_lock(&_pending_exceptions_done_spinlock);
891	pending_exceptions_done = _pending_exceptions_done_count;
892	spin_unlock(&_pending_exceptions_done_spinlock);
893
894	return pending_exceptions_done;
895}
896
897static void increment_pending_exceptions_done_count(void)
898{
899	spin_lock(&_pending_exceptions_done_spinlock);
900	_pending_exceptions_done_count++;
901	spin_unlock(&_pending_exceptions_done_spinlock);
902
903	wake_up_all(&_pending_exceptions_done);
904}
905
906static void snapshot_merge_next_chunks(struct dm_snapshot *s)
907{
908	int i, linear_chunks;
909	chunk_t old_chunk, new_chunk;
910	struct dm_io_region src, dest;
911	sector_t io_size;
912	uint64_t previous_count;
913
914	BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
915	if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
916		goto shut;
917
918	/*
919	 * valid flag never changes during merge, so no lock required.
920	 */
921	if (!s->valid) {
922		DMERR("Snapshot is invalid: can't merge");
923		goto shut;
924	}
925
926	linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
927						      &new_chunk);
928	if (linear_chunks <= 0) {
929		if (linear_chunks < 0) {
930			DMERR("Read error in exception store: "
931			      "shutting down merge");
932			down_write(&s->lock);
933			s->merge_failed = 1;
934			up_write(&s->lock);
935		}
936		goto shut;
937	}
938
939	/* Adjust old_chunk and new_chunk to reflect start of linear region */
940	old_chunk = old_chunk + 1 - linear_chunks;
941	new_chunk = new_chunk + 1 - linear_chunks;
942
943	/*
944	 * Use one (potentially large) I/O to copy all 'linear_chunks'
945	 * from the exception store to the origin
946	 */
947	io_size = linear_chunks * s->store->chunk_size;
948
949	dest.bdev = s->origin->bdev;
950	dest.sector = chunk_to_sector(s->store, old_chunk);
951	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
952
953	src.bdev = s->cow->bdev;
954	src.sector = chunk_to_sector(s->store, new_chunk);
955	src.count = dest.count;
956
957	/*
958	 * Reallocate any exceptions needed in other snapshots then
959	 * wait for the pending exceptions to complete.
960	 * Each time any pending exception (globally on the system)
961	 * completes we are woken and repeat the process to find out
962	 * if we can proceed.  While this may not seem a particularly
963	 * efficient algorithm, it is not expected to have any
964	 * significant impact on performance.
965	 */
966	previous_count = read_pending_exceptions_done_count();
967	while (origin_write_extent(s, dest.sector, io_size)) {
968		wait_event(_pending_exceptions_done,
969			   (read_pending_exceptions_done_count() !=
970			    previous_count));
971		/* Retry after the wait, until all exceptions are done. */
972		previous_count = read_pending_exceptions_done_count();
973	}
974
975	down_write(&s->lock);
976	s->first_merging_chunk = old_chunk;
977	s->num_merging_chunks = linear_chunks;
978	up_write(&s->lock);
979
980	/* Wait until writes to all 'linear_chunks' drain */
981	for (i = 0; i < linear_chunks; i++)
982		__check_for_conflicting_io(s, old_chunk + i);
983
984	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
985	return;
986
987shut:
988	merge_shutdown(s);
989}
990
991static void error_bios(struct bio *bio);
992
993static void merge_callback(int read_err, unsigned long write_err, void *context)
994{
995	struct dm_snapshot *s = context;
996	struct bio *b = NULL;
997
998	if (read_err || write_err) {
999		if (read_err)
1000			DMERR("Read error: shutting down merge.");
1001		else
1002			DMERR("Write error: shutting down merge.");
1003		goto shut;
1004	}
1005
1006	if (s->store->type->commit_merge(s->store,
1007					 s->num_merging_chunks) < 0) {
1008		DMERR("Write error in exception store: shutting down merge");
1009		goto shut;
1010	}
1011
1012	if (remove_single_exception_chunk(s) < 0)
1013		goto shut;
1014
1015	snapshot_merge_next_chunks(s);
1016
1017	return;
1018
1019shut:
1020	down_write(&s->lock);
1021	s->merge_failed = 1;
1022	b = __release_queued_bios_after_merge(s);
1023	up_write(&s->lock);
1024	error_bios(b);
1025
1026	merge_shutdown(s);
1027}
1028
1029static void start_merge(struct dm_snapshot *s)
1030{
1031	if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1032		snapshot_merge_next_chunks(s);
1033}
1034
1035/*
1036 * Stop the merging process and wait until it finishes.
1037 */
1038static void stop_merge(struct dm_snapshot *s)
1039{
1040	set_bit(SHUTDOWN_MERGE, &s->state_bits);
1041	wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1042	clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1043}
1044
1045/*
1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1047 */
1048static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1049{
1050	struct dm_snapshot *s;
1051	int i;
1052	int r = -EINVAL;
1053	char *origin_path, *cow_path;
1054	unsigned args_used, num_flush_bios = 1;
1055	fmode_t origin_mode = FMODE_READ;
1056
1057	if (argc != 4) {
1058		ti->error = "requires exactly 4 arguments";
1059		r = -EINVAL;
1060		goto bad;
1061	}
1062
1063	if (dm_target_is_snapshot_merge(ti)) {
1064		num_flush_bios = 2;
1065		origin_mode = FMODE_WRITE;
1066	}
1067
1068	s = kmalloc(sizeof(*s), GFP_KERNEL);
1069	if (!s) {
1070		ti->error = "Cannot allocate private snapshot structure";
1071		r = -ENOMEM;
1072		goto bad;
1073	}
1074
1075	origin_path = argv[0];
1076	argv++;
1077	argc--;
1078
1079	r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1080	if (r) {
1081		ti->error = "Cannot get origin device";
1082		goto bad_origin;
1083	}
1084
1085	cow_path = argv[0];
1086	argv++;
1087	argc--;
1088
1089	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1090	if (r) {
1091		ti->error = "Cannot get COW device";
1092		goto bad_cow;
1093	}
1094
1095	r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1096	if (r) {
1097		ti->error = "Couldn't create exception store";
1098		r = -EINVAL;
1099		goto bad_store;
1100	}
1101
1102	argv += args_used;
1103	argc -= args_used;
1104
1105	s->ti = ti;
1106	s->valid = 1;
1107	s->active = 0;
1108	atomic_set(&s->pending_exceptions_count, 0);
1109	s->exception_start_sequence = 0;
1110	s->exception_complete_sequence = 0;
1111	INIT_LIST_HEAD(&s->out_of_order_list);
1112	init_rwsem(&s->lock);
1113	INIT_LIST_HEAD(&s->list);
1114	spin_lock_init(&s->pe_lock);
1115	s->state_bits = 0;
1116	s->merge_failed = 0;
1117	s->first_merging_chunk = 0;
1118	s->num_merging_chunks = 0;
1119	bio_list_init(&s->bios_queued_during_merge);
1120
1121	/* Allocate hash table for COW data */
1122	if (init_hash_tables(s)) {
1123		ti->error = "Unable to allocate hash table space";
1124		r = -ENOMEM;
1125		goto bad_hash_tables;
1126	}
1127
1128	s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1129	if (IS_ERR(s->kcopyd_client)) {
1130		r = PTR_ERR(s->kcopyd_client);
1131		ti->error = "Could not create kcopyd client";
1132		goto bad_kcopyd;
1133	}
1134
1135	s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1136	if (!s->pending_pool) {
1137		ti->error = "Could not allocate mempool for pending exceptions";
1138		r = -ENOMEM;
1139		goto bad_pending_pool;
1140	}
1141
1142	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1143		INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1144
1145	spin_lock_init(&s->tracked_chunk_lock);
1146
1147	ti->private = s;
1148	ti->num_flush_bios = num_flush_bios;
1149	ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1150
1151	/* Add snapshot to the list of snapshots for this origin */
1152	/* Exceptions aren't triggered till snapshot_resume() is called */
1153	r = register_snapshot(s);
1154	if (r == -ENOMEM) {
1155		ti->error = "Snapshot origin struct allocation failed";
1156		goto bad_load_and_register;
1157	} else if (r < 0) {
1158		/* invalid handover, register_snapshot has set ti->error */
1159		goto bad_load_and_register;
1160	}
1161
1162	/*
1163	 * Metadata must only be loaded into one table at once, so skip this
1164	 * if metadata will be handed over during resume.
1165	 * Chunk size will be set during the handover - set it to zero to
1166	 * ensure it's ignored.
1167	 */
1168	if (r > 0) {
1169		s->store->chunk_size = 0;
1170		return 0;
1171	}
1172
1173	r = s->store->type->read_metadata(s->store, dm_add_exception,
1174					  (void *)s);
1175	if (r < 0) {
1176		ti->error = "Failed to read snapshot metadata";
1177		goto bad_read_metadata;
1178	} else if (r > 0) {
1179		s->valid = 0;
1180		DMWARN("Snapshot is marked invalid.");
1181	}
1182
1183	if (!s->store->chunk_size) {
1184		ti->error = "Chunk size not set";
1185		goto bad_read_metadata;
1186	}
1187
1188	r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1189	if (r)
1190		goto bad_read_metadata;
1191
1192	return 0;
1193
1194bad_read_metadata:
1195	unregister_snapshot(s);
1196
1197bad_load_and_register:
1198	mempool_destroy(s->pending_pool);
1199
1200bad_pending_pool:
1201	dm_kcopyd_client_destroy(s->kcopyd_client);
1202
1203bad_kcopyd:
1204	dm_exception_table_exit(&s->pending, pending_cache);
1205	dm_exception_table_exit(&s->complete, exception_cache);
1206
1207bad_hash_tables:
1208	dm_exception_store_destroy(s->store);
1209
1210bad_store:
1211	dm_put_device(ti, s->cow);
1212
1213bad_cow:
1214	dm_put_device(ti, s->origin);
1215
1216bad_origin:
1217	kfree(s);
1218
1219bad:
1220	return r;
1221}
1222
1223static void __free_exceptions(struct dm_snapshot *s)
1224{
1225	dm_kcopyd_client_destroy(s->kcopyd_client);
1226	s->kcopyd_client = NULL;
1227
1228	dm_exception_table_exit(&s->pending, pending_cache);
1229	dm_exception_table_exit(&s->complete, exception_cache);
1230}
1231
1232static void __handover_exceptions(struct dm_snapshot *snap_src,
1233				  struct dm_snapshot *snap_dest)
1234{
1235	union {
1236		struct dm_exception_table table_swap;
1237		struct dm_exception_store *store_swap;
1238	} u;
1239
1240	/*
1241	 * Swap all snapshot context information between the two instances.
1242	 */
1243	u.table_swap = snap_dest->complete;
1244	snap_dest->complete = snap_src->complete;
1245	snap_src->complete = u.table_swap;
1246
1247	u.store_swap = snap_dest->store;
1248	snap_dest->store = snap_src->store;
1249	snap_src->store = u.store_swap;
1250
1251	snap_dest->store->snap = snap_dest;
1252	snap_src->store->snap = snap_src;
1253
1254	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1255	snap_dest->valid = snap_src->valid;
1256
1257	/*
1258	 * Set source invalid to ensure it receives no further I/O.
1259	 */
1260	snap_src->valid = 0;
1261}
1262
1263static void snapshot_dtr(struct dm_target *ti)
1264{
1265#ifdef CONFIG_DM_DEBUG
1266	int i;
1267#endif
1268	struct dm_snapshot *s = ti->private;
1269	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1270
1271	down_read(&_origins_lock);
1272	/* Check whether exception handover must be cancelled */
1273	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1274	if (snap_src && snap_dest && (s == snap_src)) {
1275		down_write(&snap_dest->lock);
1276		snap_dest->valid = 0;
1277		up_write(&snap_dest->lock);
1278		DMERR("Cancelling snapshot handover.");
1279	}
1280	up_read(&_origins_lock);
1281
1282	if (dm_target_is_snapshot_merge(ti))
1283		stop_merge(s);
1284
1285	/* Prevent further origin writes from using this snapshot. */
1286	/* After this returns there can be no new kcopyd jobs. */
1287	unregister_snapshot(s);
1288
1289	while (atomic_read(&s->pending_exceptions_count))
1290		msleep(1);
1291	/*
1292	 * Ensure instructions in mempool_destroy aren't reordered
1293	 * before atomic_read.
1294	 */
1295	smp_mb();
1296
1297#ifdef CONFIG_DM_DEBUG
1298	for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1299		BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1300#endif
1301
1302	__free_exceptions(s);
1303
1304	mempool_destroy(s->pending_pool);
1305
1306	dm_exception_store_destroy(s->store);
1307
1308	dm_put_device(ti, s->cow);
1309
1310	dm_put_device(ti, s->origin);
1311
1312	kfree(s);
1313}
1314
1315/*
1316 * Flush a list of buffers.
1317 */
1318static void flush_bios(struct bio *bio)
1319{
1320	struct bio *n;
1321
1322	while (bio) {
1323		n = bio->bi_next;
1324		bio->bi_next = NULL;
1325		generic_make_request(bio);
1326		bio = n;
1327	}
1328}
1329
1330static int do_origin(struct dm_dev *origin, struct bio *bio);
1331
1332/*
1333 * Flush a list of buffers.
1334 */
1335static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1336{
1337	struct bio *n;
1338	int r;
1339
1340	while (bio) {
1341		n = bio->bi_next;
1342		bio->bi_next = NULL;
1343		r = do_origin(s->origin, bio);
1344		if (r == DM_MAPIO_REMAPPED)
1345			generic_make_request(bio);
1346		bio = n;
1347	}
1348}
1349
1350/*
1351 * Error a list of buffers.
1352 */
1353static void error_bios(struct bio *bio)
1354{
1355	struct bio *n;
1356
1357	while (bio) {
1358		n = bio->bi_next;
1359		bio->bi_next = NULL;
1360		bio_io_error(bio);
1361		bio = n;
1362	}
1363}
1364
1365static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1366{
1367	if (!s->valid)
1368		return;
1369
1370	if (err == -EIO)
1371		DMERR("Invalidating snapshot: Error reading/writing.");
1372	else if (err == -ENOMEM)
1373		DMERR("Invalidating snapshot: Unable to allocate exception.");
1374
1375	if (s->store->type->drop_snapshot)
1376		s->store->type->drop_snapshot(s->store);
1377
1378	s->valid = 0;
1379
1380	dm_table_event(s->ti->table);
1381}
1382
1383static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1384{
1385	struct dm_exception *e;
1386	struct dm_snapshot *s = pe->snap;
1387	struct bio *origin_bios = NULL;
1388	struct bio *snapshot_bios = NULL;
1389	struct bio *full_bio = NULL;
1390	int error = 0;
1391
1392	if (!success) {
1393		/* Read/write error - snapshot is unusable */
1394		down_write(&s->lock);
1395		__invalidate_snapshot(s, -EIO);
1396		error = 1;
1397		goto out;
1398	}
1399
1400	e = alloc_completed_exception(GFP_NOIO);
1401	if (!e) {
1402		down_write(&s->lock);
1403		__invalidate_snapshot(s, -ENOMEM);
1404		error = 1;
1405		goto out;
1406	}
1407	*e = pe->e;
1408
1409	down_write(&s->lock);
1410	if (!s->valid) {
1411		free_completed_exception(e);
1412		error = 1;
1413		goto out;
1414	}
1415
1416	/* Check for conflicting reads */
1417	__check_for_conflicting_io(s, pe->e.old_chunk);
1418
1419	/*
1420	 * Add a proper exception, and remove the
1421	 * in-flight exception from the list.
1422	 */
1423	dm_insert_exception(&s->complete, e);
1424
1425out:
1426	dm_remove_exception(&pe->e);
1427	snapshot_bios = bio_list_get(&pe->snapshot_bios);
1428	origin_bios = bio_list_get(&pe->origin_bios);
1429	full_bio = pe->full_bio;
1430	if (full_bio) {
1431		full_bio->bi_end_io = pe->full_bio_end_io;
1432		full_bio->bi_private = pe->full_bio_private;
1433		atomic_inc(&full_bio->bi_remaining);
1434	}
1435	free_pending_exception(pe);
1436
1437	increment_pending_exceptions_done_count();
1438
1439	up_write(&s->lock);
1440
1441	/* Submit any pending write bios */
1442	if (error) {
1443		if (full_bio)
1444			bio_io_error(full_bio);
1445		error_bios(snapshot_bios);
1446	} else {
1447		if (full_bio)
1448			bio_endio(full_bio, 0);
1449		flush_bios(snapshot_bios);
1450	}
1451
1452	retry_origin_bios(s, origin_bios);
1453}
1454
1455static void commit_callback(void *context, int success)
1456{
1457	struct dm_snap_pending_exception *pe = context;
1458
1459	pending_complete(pe, success);
1460}
1461
1462static void complete_exception(struct dm_snap_pending_exception *pe)
1463{
1464	struct dm_snapshot *s = pe->snap;
1465
1466	if (unlikely(pe->copy_error))
1467		pending_complete(pe, 0);
1468
1469	else
1470		/* Update the metadata if we are persistent */
1471		s->store->type->commit_exception(s->store, &pe->e,
1472						 commit_callback, pe);
1473}
1474
1475/*
1476 * Called when the copy I/O has finished.  kcopyd actually runs
1477 * this code so don't block.
1478 */
1479static void copy_callback(int read_err, unsigned long write_err, void *context)
1480{
1481	struct dm_snap_pending_exception *pe = context;
1482	struct dm_snapshot *s = pe->snap;
1483
1484	pe->copy_error = read_err || write_err;
1485
1486	if (pe->exception_sequence == s->exception_complete_sequence) {
1487		s->exception_complete_sequence++;
1488		complete_exception(pe);
1489
1490		while (!list_empty(&s->out_of_order_list)) {
1491			pe = list_entry(s->out_of_order_list.next,
1492					struct dm_snap_pending_exception, out_of_order_entry);
1493			if (pe->exception_sequence != s->exception_complete_sequence)
1494				break;
1495			s->exception_complete_sequence++;
1496			list_del(&pe->out_of_order_entry);
1497			complete_exception(pe);
1498		}
1499	} else {
1500		struct list_head *lh;
1501		struct dm_snap_pending_exception *pe2;
1502
1503		list_for_each_prev(lh, &s->out_of_order_list) {
1504			pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1505			if (pe2->exception_sequence < pe->exception_sequence)
1506				break;
1507		}
1508		list_add(&pe->out_of_order_entry, lh);
1509	}
1510}
1511
1512/*
1513 * Dispatches the copy operation to kcopyd.
1514 */
1515static void start_copy(struct dm_snap_pending_exception *pe)
1516{
1517	struct dm_snapshot *s = pe->snap;
1518	struct dm_io_region src, dest;
1519	struct block_device *bdev = s->origin->bdev;
1520	sector_t dev_size;
1521
1522	dev_size = get_dev_size(bdev);
1523
1524	src.bdev = bdev;
1525	src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1526	src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1527
1528	dest.bdev = s->cow->bdev;
1529	dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1530	dest.count = src.count;
1531
1532	/* Hand over to kcopyd */
1533	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1534}
1535
1536static void full_bio_end_io(struct bio *bio, int error)
1537{
1538	void *callback_data = bio->bi_private;
1539
1540	dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1541}
1542
1543static void start_full_bio(struct dm_snap_pending_exception *pe,
1544			   struct bio *bio)
1545{
1546	struct dm_snapshot *s = pe->snap;
1547	void *callback_data;
1548
1549	pe->full_bio = bio;
1550	pe->full_bio_end_io = bio->bi_end_io;
1551	pe->full_bio_private = bio->bi_private;
1552
1553	callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1554						   copy_callback, pe);
1555
1556	bio->bi_end_io = full_bio_end_io;
1557	bio->bi_private = callback_data;
1558
1559	generic_make_request(bio);
1560}
1561
1562static struct dm_snap_pending_exception *
1563__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1564{
1565	struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1566
1567	if (!e)
1568		return NULL;
1569
1570	return container_of(e, struct dm_snap_pending_exception, e);
1571}
1572
1573/*
1574 * Looks to see if this snapshot already has a pending exception
1575 * for this chunk, otherwise it allocates a new one and inserts
1576 * it into the pending table.
1577 *
1578 * NOTE: a write lock must be held on snap->lock before calling
1579 * this.
1580 */
1581static struct dm_snap_pending_exception *
1582__find_pending_exception(struct dm_snapshot *s,
1583			 struct dm_snap_pending_exception *pe, chunk_t chunk)
1584{
1585	struct dm_snap_pending_exception *pe2;
1586
1587	pe2 = __lookup_pending_exception(s, chunk);
1588	if (pe2) {
1589		free_pending_exception(pe);
1590		return pe2;
1591	}
1592
1593	pe->e.old_chunk = chunk;
1594	bio_list_init(&pe->origin_bios);
1595	bio_list_init(&pe->snapshot_bios);
1596	pe->started = 0;
1597	pe->full_bio = NULL;
1598
1599	if (s->store->type->prepare_exception(s->store, &pe->e)) {
1600		free_pending_exception(pe);
1601		return NULL;
1602	}
1603
1604	pe->exception_sequence = s->exception_start_sequence++;
1605
1606	dm_insert_exception(&s->pending, &pe->e);
1607
1608	return pe;
1609}
1610
1611static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1612			    struct bio *bio, chunk_t chunk)
1613{
1614	bio->bi_bdev = s->cow->bdev;
1615	bio->bi_iter.bi_sector =
1616		chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1617				(chunk - e->old_chunk)) +
1618		(bio->bi_iter.bi_sector & s->store->chunk_mask);
1619}
1620
1621static int snapshot_map(struct dm_target *ti, struct bio *bio)
1622{
1623	struct dm_exception *e;
1624	struct dm_snapshot *s = ti->private;
1625	int r = DM_MAPIO_REMAPPED;
1626	chunk_t chunk;
1627	struct dm_snap_pending_exception *pe = NULL;
1628
1629	init_tracked_chunk(bio);
1630
1631	if (bio->bi_rw & REQ_FLUSH) {
1632		bio->bi_bdev = s->cow->bdev;
1633		return DM_MAPIO_REMAPPED;
1634	}
1635
1636	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1637
1638	/* Full snapshots are not usable */
1639	/* To get here the table must be live so s->active is always set. */
1640	if (!s->valid)
1641		return -EIO;
1642
1643	/* FIXME: should only take write lock if we need
1644	 * to copy an exception */
1645	down_write(&s->lock);
1646
1647	if (!s->valid) {
1648		r = -EIO;
1649		goto out_unlock;
1650	}
1651
1652	/* If the block is already remapped - use that, else remap it */
1653	e = dm_lookup_exception(&s->complete, chunk);
1654	if (e) {
1655		remap_exception(s, e, bio, chunk);
1656		goto out_unlock;
1657	}
1658
1659	/*
1660	 * Write to snapshot - higher level takes care of RW/RO
1661	 * flags so we should only get this if we are
1662	 * writeable.
1663	 */
1664	if (bio_rw(bio) == WRITE) {
1665		pe = __lookup_pending_exception(s, chunk);
1666		if (!pe) {
1667			up_write(&s->lock);
1668			pe = alloc_pending_exception(s);
1669			down_write(&s->lock);
1670
1671			if (!s->valid) {
1672				free_pending_exception(pe);
1673				r = -EIO;
1674				goto out_unlock;
1675			}
1676
1677			e = dm_lookup_exception(&s->complete, chunk);
1678			if (e) {
1679				free_pending_exception(pe);
1680				remap_exception(s, e, bio, chunk);
1681				goto out_unlock;
1682			}
1683
1684			pe = __find_pending_exception(s, pe, chunk);
1685			if (!pe) {
1686				__invalidate_snapshot(s, -ENOMEM);
1687				r = -EIO;
1688				goto out_unlock;
1689			}
1690		}
1691
1692		remap_exception(s, &pe->e, bio, chunk);
1693
1694		r = DM_MAPIO_SUBMITTED;
1695
1696		if (!pe->started &&
1697		    bio->bi_iter.bi_size ==
1698		    (s->store->chunk_size << SECTOR_SHIFT)) {
1699			pe->started = 1;
1700			up_write(&s->lock);
1701			start_full_bio(pe, bio);
1702			goto out;
1703		}
1704
1705		bio_list_add(&pe->snapshot_bios, bio);
1706
1707		if (!pe->started) {
1708			/* this is protected by snap->lock */
1709			pe->started = 1;
1710			up_write(&s->lock);
1711			start_copy(pe);
1712			goto out;
1713		}
1714	} else {
1715		bio->bi_bdev = s->origin->bdev;
1716		track_chunk(s, bio, chunk);
1717	}
1718
1719out_unlock:
1720	up_write(&s->lock);
1721out:
1722	return r;
1723}
1724
1725/*
1726 * A snapshot-merge target behaves like a combination of a snapshot
1727 * target and a snapshot-origin target.  It only generates new
1728 * exceptions in other snapshots and not in the one that is being
1729 * merged.
1730 *
1731 * For each chunk, if there is an existing exception, it is used to
1732 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1733 * which in turn might generate exceptions in other snapshots.
1734 * If merging is currently taking place on the chunk in question, the
1735 * I/O is deferred by adding it to s->bios_queued_during_merge.
1736 */
1737static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1738{
1739	struct dm_exception *e;
1740	struct dm_snapshot *s = ti->private;
1741	int r = DM_MAPIO_REMAPPED;
1742	chunk_t chunk;
1743
1744	init_tracked_chunk(bio);
1745
1746	if (bio->bi_rw & REQ_FLUSH) {
1747		if (!dm_bio_get_target_bio_nr(bio))
1748			bio->bi_bdev = s->origin->bdev;
1749		else
1750			bio->bi_bdev = s->cow->bdev;
1751		return DM_MAPIO_REMAPPED;
1752	}
1753
1754	chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1755
1756	down_write(&s->lock);
1757
1758	/* Full merging snapshots are redirected to the origin */
1759	if (!s->valid)
1760		goto redirect_to_origin;
1761
1762	/* If the block is already remapped - use that */
1763	e = dm_lookup_exception(&s->complete, chunk);
1764	if (e) {
1765		/* Queue writes overlapping with chunks being merged */
1766		if (bio_rw(bio) == WRITE &&
1767		    chunk >= s->first_merging_chunk &&
1768		    chunk < (s->first_merging_chunk +
1769			     s->num_merging_chunks)) {
1770			bio->bi_bdev = s->origin->bdev;
1771			bio_list_add(&s->bios_queued_during_merge, bio);
1772			r = DM_MAPIO_SUBMITTED;
1773			goto out_unlock;
1774		}
1775
1776		remap_exception(s, e, bio, chunk);
1777
1778		if (bio_rw(bio) == WRITE)
1779			track_chunk(s, bio, chunk);
1780		goto out_unlock;
1781	}
1782
1783redirect_to_origin:
1784	bio->bi_bdev = s->origin->bdev;
1785
1786	if (bio_rw(bio) == WRITE) {
1787		up_write(&s->lock);
1788		return do_origin(s->origin, bio);
1789	}
1790
1791out_unlock:
1792	up_write(&s->lock);
1793
1794	return r;
1795}
1796
1797static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1798{
1799	struct dm_snapshot *s = ti->private;
1800
1801	if (is_bio_tracked(bio))
1802		stop_tracking_chunk(s, bio);
1803
1804	return 0;
1805}
1806
1807static void snapshot_merge_presuspend(struct dm_target *ti)
1808{
1809	struct dm_snapshot *s = ti->private;
1810
1811	stop_merge(s);
1812}
1813
1814static int snapshot_preresume(struct dm_target *ti)
1815{
1816	int r = 0;
1817	struct dm_snapshot *s = ti->private;
1818	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1819
1820	down_read(&_origins_lock);
1821	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1822	if (snap_src && snap_dest) {
1823		down_read(&snap_src->lock);
1824		if (s == snap_src) {
1825			DMERR("Unable to resume snapshot source until "
1826			      "handover completes.");
1827			r = -EINVAL;
1828		} else if (!dm_suspended(snap_src->ti)) {
1829			DMERR("Unable to perform snapshot handover until "
1830			      "source is suspended.");
1831			r = -EINVAL;
1832		}
1833		up_read(&snap_src->lock);
1834	}
1835	up_read(&_origins_lock);
1836
1837	return r;
1838}
1839
1840static void snapshot_resume(struct dm_target *ti)
1841{
1842	struct dm_snapshot *s = ti->private;
1843	struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1844
1845	down_read(&_origins_lock);
1846	(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1847	if (snap_src && snap_dest) {
1848		down_write(&snap_src->lock);
1849		down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1850		__handover_exceptions(snap_src, snap_dest);
1851		up_write(&snap_dest->lock);
1852		up_write(&snap_src->lock);
1853	}
1854	up_read(&_origins_lock);
1855
1856	/* Now we have correct chunk size, reregister */
1857	reregister_snapshot(s);
1858
1859	down_write(&s->lock);
1860	s->active = 1;
1861	up_write(&s->lock);
1862}
1863
1864static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1865{
1866	uint32_t min_chunksize;
1867
1868	down_read(&_origins_lock);
1869	min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1870	up_read(&_origins_lock);
1871
1872	return min_chunksize;
1873}
1874
1875static void snapshot_merge_resume(struct dm_target *ti)
1876{
1877	struct dm_snapshot *s = ti->private;
1878
1879	/*
1880	 * Handover exceptions from existing snapshot.
1881	 */
1882	snapshot_resume(ti);
1883
1884	/*
1885	 * snapshot-merge acts as an origin, so set ti->max_io_len
1886	 */
1887	ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1888
1889	start_merge(s);
1890}
1891
1892static void snapshot_status(struct dm_target *ti, status_type_t type,
1893			    unsigned status_flags, char *result, unsigned maxlen)
1894{
1895	unsigned sz = 0;
1896	struct dm_snapshot *snap = ti->private;
1897
1898	switch (type) {
1899	case STATUSTYPE_INFO:
1900
1901		down_write(&snap->lock);
1902
1903		if (!snap->valid)
1904			DMEMIT("Invalid");
1905		else if (snap->merge_failed)
1906			DMEMIT("Merge failed");
1907		else {
1908			if (snap->store->type->usage) {
1909				sector_t total_sectors, sectors_allocated,
1910					 metadata_sectors;
1911				snap->store->type->usage(snap->store,
1912							 &total_sectors,
1913							 &sectors_allocated,
1914							 &metadata_sectors);
1915				DMEMIT("%llu/%llu %llu",
1916				       (unsigned long long)sectors_allocated,
1917				       (unsigned long long)total_sectors,
1918				       (unsigned long long)metadata_sectors);
1919			}
1920			else
1921				DMEMIT("Unknown");
1922		}
1923
1924		up_write(&snap->lock);
1925
1926		break;
1927
1928	case STATUSTYPE_TABLE:
1929		/*
1930		 * kdevname returns a static pointer so we need
1931		 * to make private copies if the output is to
1932		 * make sense.
1933		 */
1934		DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1935		snap->store->type->status(snap->store, type, result + sz,
1936					  maxlen - sz);
1937		break;
1938	}
1939}
1940
1941static int snapshot_iterate_devices(struct dm_target *ti,
1942				    iterate_devices_callout_fn fn, void *data)
1943{
1944	struct dm_snapshot *snap = ti->private;
1945	int r;
1946
1947	r = fn(ti, snap->origin, 0, ti->len, data);
1948
1949	if (!r)
1950		r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1951
1952	return r;
1953}
1954
1955
1956/*-----------------------------------------------------------------
1957 * Origin methods
1958 *---------------------------------------------------------------*/
1959
1960/*
1961 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1962 * supplied bio was ignored.  The caller may submit it immediately.
1963 * (No remapping actually occurs as the origin is always a direct linear
1964 * map.)
1965 *
1966 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1967 * and any supplied bio is added to a list to be submitted once all
1968 * the necessary exceptions exist.
1969 */
1970static int __origin_write(struct list_head *snapshots, sector_t sector,
1971			  struct bio *bio)
1972{
1973	int r = DM_MAPIO_REMAPPED;
1974	struct dm_snapshot *snap;
1975	struct dm_exception *e;
1976	struct dm_snap_pending_exception *pe;
1977	struct dm_snap_pending_exception *pe_to_start_now = NULL;
1978	struct dm_snap_pending_exception *pe_to_start_last = NULL;
1979	chunk_t chunk;
1980
1981	/* Do all the snapshots on this origin */
1982	list_for_each_entry (snap, snapshots, list) {
1983		/*
1984		 * Don't make new exceptions in a merging snapshot
1985		 * because it has effectively been deleted
1986		 */
1987		if (dm_target_is_snapshot_merge(snap->ti))
1988			continue;
1989
1990		down_write(&snap->lock);
1991
1992		/* Only deal with valid and active snapshots */
1993		if (!snap->valid || !snap->active)
1994			goto next_snapshot;
1995
1996		/* Nothing to do if writing beyond end of snapshot */
1997		if (sector >= dm_table_get_size(snap->ti->table))
1998			goto next_snapshot;
1999
2000		/*
2001		 * Remember, different snapshots can have
2002		 * different chunk sizes.
2003		 */
2004		chunk = sector_to_chunk(snap->store, sector);
2005
2006		/*
2007		 * Check exception table to see if block
2008		 * is already remapped in this snapshot
2009		 * and trigger an exception if not.
2010		 */
2011		e = dm_lookup_exception(&snap->complete, chunk);
2012		if (e)
2013			goto next_snapshot;
2014
2015		pe = __lookup_pending_exception(snap, chunk);
2016		if (!pe) {
2017			up_write(&snap->lock);
2018			pe = alloc_pending_exception(snap);
2019			down_write(&snap->lock);
2020
2021			if (!snap->valid) {
2022				free_pending_exception(pe);
2023				goto next_snapshot;
2024			}
2025
2026			e = dm_lookup_exception(&snap->complete, chunk);
2027			if (e) {
2028				free_pending_exception(pe);
2029				goto next_snapshot;
2030			}
2031
2032			pe = __find_pending_exception(snap, pe, chunk);
2033			if (!pe) {
2034				__invalidate_snapshot(snap, -ENOMEM);
2035				goto next_snapshot;
2036			}
2037		}
2038
2039		r = DM_MAPIO_SUBMITTED;
2040
2041		/*
2042		 * If an origin bio was supplied, queue it to wait for the
2043		 * completion of this exception, and start this one last,
2044		 * at the end of the function.
2045		 */
2046		if (bio) {
2047			bio_list_add(&pe->origin_bios, bio);
2048			bio = NULL;
2049
2050			if (!pe->started) {
2051				pe->started = 1;
2052				pe_to_start_last = pe;
2053			}
2054		}
2055
2056		if (!pe->started) {
2057			pe->started = 1;
2058			pe_to_start_now = pe;
2059		}
2060
2061next_snapshot:
2062		up_write(&snap->lock);
2063
2064		if (pe_to_start_now) {
2065			start_copy(pe_to_start_now);
2066			pe_to_start_now = NULL;
2067		}
2068	}
2069
2070	/*
2071	 * Submit the exception against which the bio is queued last,
2072	 * to give the other exceptions a head start.
2073	 */
2074	if (pe_to_start_last)
2075		start_copy(pe_to_start_last);
2076
2077	return r;
2078}
2079
2080/*
2081 * Called on a write from the origin driver.
2082 */
2083static int do_origin(struct dm_dev *origin, struct bio *bio)
2084{
2085	struct origin *o;
2086	int r = DM_MAPIO_REMAPPED;
2087
2088	down_read(&_origins_lock);
2089	o = __lookup_origin(origin->bdev);
2090	if (o)
2091		r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2092	up_read(&_origins_lock);
2093
2094	return r;
2095}
2096
2097/*
2098 * Trigger exceptions in all non-merging snapshots.
2099 *
2100 * The chunk size of the merging snapshot may be larger than the chunk
2101 * size of some other snapshot so we may need to reallocate multiple
2102 * chunks in other snapshots.
2103 *
2104 * We scan all the overlapping exceptions in the other snapshots.
2105 * Returns 1 if anything was reallocated and must be waited for,
2106 * otherwise returns 0.
2107 *
2108 * size must be a multiple of merging_snap's chunk_size.
2109 */
2110static int origin_write_extent(struct dm_snapshot *merging_snap,
2111			       sector_t sector, unsigned size)
2112{
2113	int must_wait = 0;
2114	sector_t n;
2115	struct origin *o;
2116
2117	/*
2118	 * The origin's __minimum_chunk_size() got stored in max_io_len
2119	 * by snapshot_merge_resume().
2120	 */
2121	down_read(&_origins_lock);
2122	o = __lookup_origin(merging_snap->origin->bdev);
2123	for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2124		if (__origin_write(&o->snapshots, sector + n, NULL) ==
2125		    DM_MAPIO_SUBMITTED)
2126			must_wait = 1;
2127	up_read(&_origins_lock);
2128
2129	return must_wait;
2130}
2131
2132/*
2133 * Origin: maps a linear range of a device, with hooks for snapshotting.
2134 */
2135
2136struct dm_origin {
2137	struct dm_dev *dev;
2138	unsigned split_boundary;
2139};
2140
2141/*
2142 * Construct an origin mapping: <dev_path>
2143 * The context for an origin is merely a 'struct dm_dev *'
2144 * pointing to the real device.
2145 */
2146static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2147{
2148	int r;
2149	struct dm_origin *o;
2150
2151	if (argc != 1) {
2152		ti->error = "origin: incorrect number of arguments";
2153		return -EINVAL;
2154	}
2155
2156	o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2157	if (!o) {
2158		ti->error = "Cannot allocate private origin structure";
2159		r = -ENOMEM;
2160		goto bad_alloc;
2161	}
2162
2163	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2164	if (r) {
2165		ti->error = "Cannot get target device";
2166		goto bad_open;
2167	}
2168
2169	ti->private = o;
2170	ti->num_flush_bios = 1;
2171
2172	return 0;
2173
2174bad_open:
2175	kfree(o);
2176bad_alloc:
2177	return r;
2178}
2179
2180static void origin_dtr(struct dm_target *ti)
2181{
2182	struct dm_origin *o = ti->private;
2183	dm_put_device(ti, o->dev);
2184	kfree(o);
2185}
2186
2187static int origin_map(struct dm_target *ti, struct bio *bio)
2188{
2189	struct dm_origin *o = ti->private;
2190	unsigned available_sectors;
2191
2192	bio->bi_bdev = o->dev->bdev;
2193
2194	if (unlikely(bio->bi_rw & REQ_FLUSH))
2195		return DM_MAPIO_REMAPPED;
2196
2197	if (bio_rw(bio) != WRITE)
2198		return DM_MAPIO_REMAPPED;
2199
2200	available_sectors = o->split_boundary -
2201		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2202
2203	if (bio_sectors(bio) > available_sectors)
2204		dm_accept_partial_bio(bio, available_sectors);
2205
2206	/* Only tell snapshots if this is a write */
2207	return do_origin(o->dev, bio);
2208}
2209
2210/*
2211 * Set the target "max_io_len" field to the minimum of all the snapshots'
2212 * chunk sizes.
2213 */
2214static void origin_resume(struct dm_target *ti)
2215{
2216	struct dm_origin *o = ti->private;
2217
2218	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2219}
2220
2221static void origin_status(struct dm_target *ti, status_type_t type,
2222			  unsigned status_flags, char *result, unsigned maxlen)
2223{
2224	struct dm_origin *o = ti->private;
2225
2226	switch (type) {
2227	case STATUSTYPE_INFO:
2228		result[0] = '\0';
2229		break;
2230
2231	case STATUSTYPE_TABLE:
2232		snprintf(result, maxlen, "%s", o->dev->name);
2233		break;
2234	}
2235}
2236
2237static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2238			struct bio_vec *biovec, int max_size)
2239{
2240	struct dm_origin *o = ti->private;
2241	struct request_queue *q = bdev_get_queue(o->dev->bdev);
2242
2243	if (!q->merge_bvec_fn)
2244		return max_size;
2245
2246	bvm->bi_bdev = o->dev->bdev;
2247
2248	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2249}
2250
2251static int origin_iterate_devices(struct dm_target *ti,
2252				  iterate_devices_callout_fn fn, void *data)
2253{
2254	struct dm_origin *o = ti->private;
2255
2256	return fn(ti, o->dev, 0, ti->len, data);
2257}
2258
2259static struct target_type origin_target = {
2260	.name    = "snapshot-origin",
2261	.version = {1, 8, 1},
2262	.module  = THIS_MODULE,
2263	.ctr     = origin_ctr,
2264	.dtr     = origin_dtr,
2265	.map     = origin_map,
2266	.resume  = origin_resume,
2267	.status  = origin_status,
2268	.merge	 = origin_merge,
2269	.iterate_devices = origin_iterate_devices,
2270};
2271
2272static struct target_type snapshot_target = {
2273	.name    = "snapshot",
2274	.version = {1, 12, 0},
2275	.module  = THIS_MODULE,
2276	.ctr     = snapshot_ctr,
2277	.dtr     = snapshot_dtr,
2278	.map     = snapshot_map,
2279	.end_io  = snapshot_end_io,
2280	.preresume  = snapshot_preresume,
2281	.resume  = snapshot_resume,
2282	.status  = snapshot_status,
2283	.iterate_devices = snapshot_iterate_devices,
2284};
2285
2286static struct target_type merge_target = {
2287	.name    = dm_snapshot_merge_target_name,
2288	.version = {1, 2, 0},
2289	.module  = THIS_MODULE,
2290	.ctr     = snapshot_ctr,
2291	.dtr     = snapshot_dtr,
2292	.map     = snapshot_merge_map,
2293	.end_io  = snapshot_end_io,
2294	.presuspend = snapshot_merge_presuspend,
2295	.preresume  = snapshot_preresume,
2296	.resume  = snapshot_merge_resume,
2297	.status  = snapshot_status,
2298	.iterate_devices = snapshot_iterate_devices,
2299};
2300
2301static int __init dm_snapshot_init(void)
2302{
2303	int r;
2304
2305	r = dm_exception_store_init();
2306	if (r) {
2307		DMERR("Failed to initialize exception stores");
2308		return r;
2309	}
2310
2311	r = dm_register_target(&snapshot_target);
2312	if (r < 0) {
2313		DMERR("snapshot target register failed %d", r);
2314		goto bad_register_snapshot_target;
2315	}
2316
2317	r = dm_register_target(&origin_target);
2318	if (r < 0) {
2319		DMERR("Origin target register failed %d", r);
2320		goto bad_register_origin_target;
2321	}
2322
2323	r = dm_register_target(&merge_target);
2324	if (r < 0) {
2325		DMERR("Merge target register failed %d", r);
2326		goto bad_register_merge_target;
2327	}
2328
2329	r = init_origin_hash();
2330	if (r) {
2331		DMERR("init_origin_hash failed.");
2332		goto bad_origin_hash;
2333	}
2334
2335	exception_cache = KMEM_CACHE(dm_exception, 0);
2336	if (!exception_cache) {
2337		DMERR("Couldn't create exception cache.");
2338		r = -ENOMEM;
2339		goto bad_exception_cache;
2340	}
2341
2342	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2343	if (!pending_cache) {
2344		DMERR("Couldn't create pending cache.");
2345		r = -ENOMEM;
2346		goto bad_pending_cache;
2347	}
2348
2349	return 0;
2350
2351bad_pending_cache:
2352	kmem_cache_destroy(exception_cache);
2353bad_exception_cache:
2354	exit_origin_hash();
2355bad_origin_hash:
2356	dm_unregister_target(&merge_target);
2357bad_register_merge_target:
2358	dm_unregister_target(&origin_target);
2359bad_register_origin_target:
2360	dm_unregister_target(&snapshot_target);
2361bad_register_snapshot_target:
2362	dm_exception_store_exit();
2363
2364	return r;
2365}
2366
2367static void __exit dm_snapshot_exit(void)
2368{
2369	dm_unregister_target(&snapshot_target);
2370	dm_unregister_target(&origin_target);
2371	dm_unregister_target(&merge_target);
2372
2373	exit_origin_hash();
2374	kmem_cache_destroy(pending_cache);
2375	kmem_cache_destroy(exception_cache);
2376
2377	dm_exception_store_exit();
2378}
2379
2380/* Module hooks */
2381module_init(dm_snapshot_init);
2382module_exit(dm_snapshot_exit);
2383
2384MODULE_DESCRIPTION(DM_NAME " snapshot target");
2385MODULE_AUTHOR("Joe Thornber");
2386MODULE_LICENSE("GPL");
2387MODULE_ALIAS("dm-snapshot-origin");
2388MODULE_ALIAS("dm-snapshot-merge");
2389