dm-snap.c revision a765e20eeb423d0fa6a02ffab51141e53bbd93cb
1/*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9#include <linux/blkdev.h>
10#include <linux/ctype.h>
11#include <linux/device-mapper.h>
12#include <linux/fs.h>
13#include <linux/init.h>
14#include <linux/kdev_t.h>
15#include <linux/list.h>
16#include <linux/mempool.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/log2.h>
21#include <linux/dm-kcopyd.h>
22
23#include "dm-snap.h"
24#include "dm-bio-list.h"
25
26#define DM_MSG_PREFIX "snapshots"
27
28/*
29 * The percentage increment we will wake up users at
30 */
31#define WAKE_UP_PERCENT 5
32
33/*
34 * kcopyd priority of snapshot operations
35 */
36#define SNAPSHOT_COPY_PRIORITY 2
37
38/*
39 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
40 */
41#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
42
43static struct workqueue_struct *ksnapd;
44static void flush_queued_bios(struct work_struct *work);
45
46struct dm_snap_pending_exception {
47	struct dm_snap_exception e;
48
49	/*
50	 * Origin buffers waiting for this to complete are held
51	 * in a bio list
52	 */
53	struct bio_list origin_bios;
54	struct bio_list snapshot_bios;
55
56	/*
57	 * Short-term queue of pending exceptions prior to submission.
58	 */
59	struct list_head list;
60
61	/*
62	 * The primary pending_exception is the one that holds
63	 * the ref_count and the list of origin_bios for a
64	 * group of pending_exceptions.  It is always last to get freed.
65	 * These fields get set up when writing to the origin.
66	 */
67	struct dm_snap_pending_exception *primary_pe;
68
69	/*
70	 * Number of pending_exceptions processing this chunk.
71	 * When this drops to zero we must complete the origin bios.
72	 * If incrementing or decrementing this, hold pe->snap->lock for
73	 * the sibling concerned and not pe->primary_pe->snap->lock unless
74	 * they are the same.
75	 */
76	atomic_t ref_count;
77
78	/* Pointer back to snapshot context */
79	struct dm_snapshot *snap;
80
81	/*
82	 * 1 indicates the exception has already been sent to
83	 * kcopyd.
84	 */
85	int started;
86};
87
88/*
89 * Hash table mapping origin volumes to lists of snapshots and
90 * a lock to protect it
91 */
92static struct kmem_cache *exception_cache;
93static struct kmem_cache *pending_cache;
94static mempool_t *pending_pool;
95
96/*
97 * One of these per registered origin, held in the snapshot_origins hash
98 */
99struct origin {
100	/* The origin device */
101	struct block_device *bdev;
102
103	struct list_head hash_list;
104
105	/* List of snapshots for this origin */
106	struct list_head snapshots;
107};
108
109/*
110 * Size of the hash table for origin volumes. If we make this
111 * the size of the minors list then it should be nearly perfect
112 */
113#define ORIGIN_HASH_SIZE 256
114#define ORIGIN_MASK      0xFF
115static struct list_head *_origins;
116static struct rw_semaphore _origins_lock;
117
118static int init_origin_hash(void)
119{
120	int i;
121
122	_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
123			   GFP_KERNEL);
124	if (!_origins) {
125		DMERR("unable to allocate memory");
126		return -ENOMEM;
127	}
128
129	for (i = 0; i < ORIGIN_HASH_SIZE; i++)
130		INIT_LIST_HEAD(_origins + i);
131	init_rwsem(&_origins_lock);
132
133	return 0;
134}
135
136static void exit_origin_hash(void)
137{
138	kfree(_origins);
139}
140
141static unsigned origin_hash(struct block_device *bdev)
142{
143	return bdev->bd_dev & ORIGIN_MASK;
144}
145
146static struct origin *__lookup_origin(struct block_device *origin)
147{
148	struct list_head *ol;
149	struct origin *o;
150
151	ol = &_origins[origin_hash(origin)];
152	list_for_each_entry (o, ol, hash_list)
153		if (bdev_equal(o->bdev, origin))
154			return o;
155
156	return NULL;
157}
158
159static void __insert_origin(struct origin *o)
160{
161	struct list_head *sl = &_origins[origin_hash(o->bdev)];
162	list_add_tail(&o->hash_list, sl);
163}
164
165/*
166 * Make a note of the snapshot and its origin so we can look it
167 * up when the origin has a write on it.
168 */
169static int register_snapshot(struct dm_snapshot *snap)
170{
171	struct origin *o;
172	struct block_device *bdev = snap->origin->bdev;
173
174	down_write(&_origins_lock);
175	o = __lookup_origin(bdev);
176
177	if (!o) {
178		/* New origin */
179		o = kmalloc(sizeof(*o), GFP_KERNEL);
180		if (!o) {
181			up_write(&_origins_lock);
182			return -ENOMEM;
183		}
184
185		/* Initialise the struct */
186		INIT_LIST_HEAD(&o->snapshots);
187		o->bdev = bdev;
188
189		__insert_origin(o);
190	}
191
192	list_add_tail(&snap->list, &o->snapshots);
193
194	up_write(&_origins_lock);
195	return 0;
196}
197
198static void unregister_snapshot(struct dm_snapshot *s)
199{
200	struct origin *o;
201
202	down_write(&_origins_lock);
203	o = __lookup_origin(s->origin->bdev);
204
205	list_del(&s->list);
206	if (list_empty(&o->snapshots)) {
207		list_del(&o->hash_list);
208		kfree(o);
209	}
210
211	up_write(&_origins_lock);
212}
213
214/*
215 * Implementation of the exception hash tables.
216 * The lowest hash_shift bits of the chunk number are ignored, allowing
217 * some consecutive chunks to be grouped together.
218 */
219static int init_exception_table(struct exception_table *et, uint32_t size,
220				unsigned hash_shift)
221{
222	unsigned int i;
223
224	et->hash_shift = hash_shift;
225	et->hash_mask = size - 1;
226	et->table = dm_vcalloc(size, sizeof(struct list_head));
227	if (!et->table)
228		return -ENOMEM;
229
230	for (i = 0; i < size; i++)
231		INIT_LIST_HEAD(et->table + i);
232
233	return 0;
234}
235
236static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
237{
238	struct list_head *slot;
239	struct dm_snap_exception *ex, *next;
240	int i, size;
241
242	size = et->hash_mask + 1;
243	for (i = 0; i < size; i++) {
244		slot = et->table + i;
245
246		list_for_each_entry_safe (ex, next, slot, hash_list)
247			kmem_cache_free(mem, ex);
248	}
249
250	vfree(et->table);
251}
252
253static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
254{
255	return (chunk >> et->hash_shift) & et->hash_mask;
256}
257
258static void insert_exception(struct exception_table *eh,
259			     struct dm_snap_exception *e)
260{
261	struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
262	list_add(&e->hash_list, l);
263}
264
265static void remove_exception(struct dm_snap_exception *e)
266{
267	list_del(&e->hash_list);
268}
269
270/*
271 * Return the exception data for a sector, or NULL if not
272 * remapped.
273 */
274static struct dm_snap_exception *lookup_exception(struct exception_table *et,
275						  chunk_t chunk)
276{
277	struct list_head *slot;
278	struct dm_snap_exception *e;
279
280	slot = &et->table[exception_hash(et, chunk)];
281	list_for_each_entry (e, slot, hash_list)
282		if (chunk >= e->old_chunk &&
283		    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
284			return e;
285
286	return NULL;
287}
288
289static struct dm_snap_exception *alloc_exception(void)
290{
291	struct dm_snap_exception *e;
292
293	e = kmem_cache_alloc(exception_cache, GFP_NOIO);
294	if (!e)
295		e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
296
297	return e;
298}
299
300static void free_exception(struct dm_snap_exception *e)
301{
302	kmem_cache_free(exception_cache, e);
303}
304
305static struct dm_snap_pending_exception *alloc_pending_exception(void)
306{
307	return mempool_alloc(pending_pool, GFP_NOIO);
308}
309
310static void free_pending_exception(struct dm_snap_pending_exception *pe)
311{
312	mempool_free(pe, pending_pool);
313}
314
315static void insert_completed_exception(struct dm_snapshot *s,
316				       struct dm_snap_exception *new_e)
317{
318	struct exception_table *eh = &s->complete;
319	struct list_head *l;
320	struct dm_snap_exception *e = NULL;
321
322	l = &eh->table[exception_hash(eh, new_e->old_chunk)];
323
324	/* Add immediately if this table doesn't support consecutive chunks */
325	if (!eh->hash_shift)
326		goto out;
327
328	/* List is ordered by old_chunk */
329	list_for_each_entry_reverse(e, l, hash_list) {
330		/* Insert after an existing chunk? */
331		if (new_e->old_chunk == (e->old_chunk +
332					 dm_consecutive_chunk_count(e) + 1) &&
333		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
334					 dm_consecutive_chunk_count(e) + 1)) {
335			dm_consecutive_chunk_count_inc(e);
336			free_exception(new_e);
337			return;
338		}
339
340		/* Insert before an existing chunk? */
341		if (new_e->old_chunk == (e->old_chunk - 1) &&
342		    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
343			dm_consecutive_chunk_count_inc(e);
344			e->old_chunk--;
345			e->new_chunk--;
346			free_exception(new_e);
347			return;
348		}
349
350		if (new_e->old_chunk > e->old_chunk)
351			break;
352	}
353
354out:
355	list_add(&new_e->hash_list, e ? &e->hash_list : l);
356}
357
358int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
359{
360	struct dm_snap_exception *e;
361
362	e = alloc_exception();
363	if (!e)
364		return -ENOMEM;
365
366	e->old_chunk = old;
367
368	/* Consecutive_count is implicitly initialised to zero */
369	e->new_chunk = new;
370
371	insert_completed_exception(s, e);
372
373	return 0;
374}
375
376/*
377 * Hard coded magic.
378 */
379static int calc_max_buckets(void)
380{
381	/* use a fixed size of 2MB */
382	unsigned long mem = 2 * 1024 * 1024;
383	mem /= sizeof(struct list_head);
384
385	return mem;
386}
387
388/*
389 * Allocate room for a suitable hash table.
390 */
391static int init_hash_tables(struct dm_snapshot *s)
392{
393	sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
394
395	/*
396	 * Calculate based on the size of the original volume or
397	 * the COW volume...
398	 */
399	cow_dev_size = get_dev_size(s->cow->bdev);
400	origin_dev_size = get_dev_size(s->origin->bdev);
401	max_buckets = calc_max_buckets();
402
403	hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
404	hash_size = min(hash_size, max_buckets);
405
406	hash_size = rounddown_pow_of_two(hash_size);
407	if (init_exception_table(&s->complete, hash_size,
408				 DM_CHUNK_CONSECUTIVE_BITS))
409		return -ENOMEM;
410
411	/*
412	 * Allocate hash table for in-flight exceptions
413	 * Make this smaller than the real hash table
414	 */
415	hash_size >>= 3;
416	if (hash_size < 64)
417		hash_size = 64;
418
419	if (init_exception_table(&s->pending, hash_size, 0)) {
420		exit_exception_table(&s->complete, exception_cache);
421		return -ENOMEM;
422	}
423
424	return 0;
425}
426
427/*
428 * Round a number up to the nearest 'size' boundary.  size must
429 * be a power of 2.
430 */
431static ulong round_up(ulong n, ulong size)
432{
433	size--;
434	return (n + size) & ~size;
435}
436
437static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
438			  char **error)
439{
440	unsigned long chunk_size;
441	char *value;
442
443	chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
444	if (*chunk_size_arg == '\0' || *value != '\0') {
445		*error = "Invalid chunk size";
446		return -EINVAL;
447	}
448
449	if (!chunk_size) {
450		s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
451		return 0;
452	}
453
454	/*
455	 * Chunk size must be multiple of page size.  Silently
456	 * round up if it's not.
457	 */
458	chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
459
460	/* Check chunk_size is a power of 2 */
461	if (!is_power_of_2(chunk_size)) {
462		*error = "Chunk size is not a power of 2";
463		return -EINVAL;
464	}
465
466	/* Validate the chunk size against the device block size */
467	if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
468		*error = "Chunk size is not a multiple of device blocksize";
469		return -EINVAL;
470	}
471
472	s->chunk_size = chunk_size;
473	s->chunk_mask = chunk_size - 1;
474	s->chunk_shift = ffs(chunk_size) - 1;
475
476	return 0;
477}
478
479/*
480 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
481 */
482static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
483{
484	struct dm_snapshot *s;
485	int r = -EINVAL;
486	char persistent;
487	char *origin_path;
488	char *cow_path;
489
490	if (argc != 4) {
491		ti->error = "requires exactly 4 arguments";
492		r = -EINVAL;
493		goto bad1;
494	}
495
496	origin_path = argv[0];
497	cow_path = argv[1];
498	persistent = toupper(*argv[2]);
499
500	if (persistent != 'P' && persistent != 'N') {
501		ti->error = "Persistent flag is not P or N";
502		r = -EINVAL;
503		goto bad1;
504	}
505
506	s = kmalloc(sizeof(*s), GFP_KERNEL);
507	if (s == NULL) {
508		ti->error = "Cannot allocate snapshot context private "
509		    "structure";
510		r = -ENOMEM;
511		goto bad1;
512	}
513
514	r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
515	if (r) {
516		ti->error = "Cannot get origin device";
517		goto bad2;
518	}
519
520	r = dm_get_device(ti, cow_path, 0, 0,
521			  FMODE_READ | FMODE_WRITE, &s->cow);
522	if (r) {
523		dm_put_device(ti, s->origin);
524		ti->error = "Cannot get COW device";
525		goto bad2;
526	}
527
528	r = set_chunk_size(s, argv[3], &ti->error);
529	if (r)
530		goto bad3;
531
532	s->type = persistent;
533
534	s->valid = 1;
535	s->active = 0;
536	s->last_percent = 0;
537	init_rwsem(&s->lock);
538	spin_lock_init(&s->pe_lock);
539	s->ti = ti;
540
541	/* Allocate hash table for COW data */
542	if (init_hash_tables(s)) {
543		ti->error = "Unable to allocate hash table space";
544		r = -ENOMEM;
545		goto bad3;
546	}
547
548	s->store.snap = s;
549
550	if (persistent == 'P')
551		r = dm_create_persistent(&s->store);
552	else
553		r = dm_create_transient(&s->store);
554
555	if (r) {
556		ti->error = "Couldn't create exception store";
557		r = -EINVAL;
558		goto bad4;
559	}
560
561	r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
562	if (r) {
563		ti->error = "Could not create kcopyd client";
564		goto bad5;
565	}
566
567	/* Metadata must only be loaded into one table at once */
568	r = s->store.read_metadata(&s->store);
569	if (r < 0) {
570		ti->error = "Failed to read snapshot metadata";
571		goto bad6;
572	} else if (r > 0) {
573		s->valid = 0;
574		DMWARN("Snapshot is marked invalid.");
575	}
576
577	bio_list_init(&s->queued_bios);
578	INIT_WORK(&s->queued_bios_work, flush_queued_bios);
579
580	/* Add snapshot to the list of snapshots for this origin */
581	/* Exceptions aren't triggered till snapshot_resume() is called */
582	if (register_snapshot(s)) {
583		r = -EINVAL;
584		ti->error = "Cannot register snapshot origin";
585		goto bad6;
586	}
587
588	ti->private = s;
589	ti->split_io = s->chunk_size;
590
591	return 0;
592
593 bad6:
594	dm_kcopyd_client_destroy(s->kcopyd_client);
595
596 bad5:
597	s->store.destroy(&s->store);
598
599 bad4:
600	exit_exception_table(&s->pending, pending_cache);
601	exit_exception_table(&s->complete, exception_cache);
602
603 bad3:
604	dm_put_device(ti, s->cow);
605	dm_put_device(ti, s->origin);
606
607 bad2:
608	kfree(s);
609
610 bad1:
611	return r;
612}
613
614static void __free_exceptions(struct dm_snapshot *s)
615{
616	dm_kcopyd_client_destroy(s->kcopyd_client);
617	s->kcopyd_client = NULL;
618
619	exit_exception_table(&s->pending, pending_cache);
620	exit_exception_table(&s->complete, exception_cache);
621
622	s->store.destroy(&s->store);
623}
624
625static void snapshot_dtr(struct dm_target *ti)
626{
627	struct dm_snapshot *s = ti->private;
628
629	flush_workqueue(ksnapd);
630
631	/* Prevent further origin writes from using this snapshot. */
632	/* After this returns there can be no new kcopyd jobs. */
633	unregister_snapshot(s);
634
635	__free_exceptions(s);
636
637	dm_put_device(ti, s->origin);
638	dm_put_device(ti, s->cow);
639
640	kfree(s);
641}
642
643/*
644 * Flush a list of buffers.
645 */
646static void flush_bios(struct bio *bio)
647{
648	struct bio *n;
649
650	while (bio) {
651		n = bio->bi_next;
652		bio->bi_next = NULL;
653		generic_make_request(bio);
654		bio = n;
655	}
656}
657
658static void flush_queued_bios(struct work_struct *work)
659{
660	struct dm_snapshot *s =
661		container_of(work, struct dm_snapshot, queued_bios_work);
662	struct bio *queued_bios;
663	unsigned long flags;
664
665	spin_lock_irqsave(&s->pe_lock, flags);
666	queued_bios = bio_list_get(&s->queued_bios);
667	spin_unlock_irqrestore(&s->pe_lock, flags);
668
669	flush_bios(queued_bios);
670}
671
672/*
673 * Error a list of buffers.
674 */
675static void error_bios(struct bio *bio)
676{
677	struct bio *n;
678
679	while (bio) {
680		n = bio->bi_next;
681		bio->bi_next = NULL;
682		bio_io_error(bio);
683		bio = n;
684	}
685}
686
687static void __invalidate_snapshot(struct dm_snapshot *s, int err)
688{
689	if (!s->valid)
690		return;
691
692	if (err == -EIO)
693		DMERR("Invalidating snapshot: Error reading/writing.");
694	else if (err == -ENOMEM)
695		DMERR("Invalidating snapshot: Unable to allocate exception.");
696
697	if (s->store.drop_snapshot)
698		s->store.drop_snapshot(&s->store);
699
700	s->valid = 0;
701
702	dm_table_event(s->ti->table);
703}
704
705static void get_pending_exception(struct dm_snap_pending_exception *pe)
706{
707	atomic_inc(&pe->ref_count);
708}
709
710static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
711{
712	struct dm_snap_pending_exception *primary_pe;
713	struct bio *origin_bios = NULL;
714
715	primary_pe = pe->primary_pe;
716
717	/*
718	 * If this pe is involved in a write to the origin and
719	 * it is the last sibling to complete then release
720	 * the bios for the original write to the origin.
721	 */
722	if (primary_pe &&
723	    atomic_dec_and_test(&primary_pe->ref_count))
724		origin_bios = bio_list_get(&primary_pe->origin_bios);
725
726	/*
727	 * Free the pe if it's not linked to an origin write or if
728	 * it's not itself a primary pe.
729	 */
730	if (!primary_pe || primary_pe != pe)
731		free_pending_exception(pe);
732
733	/*
734	 * Free the primary pe if nothing references it.
735	 */
736	if (primary_pe && !atomic_read(&primary_pe->ref_count))
737		free_pending_exception(primary_pe);
738
739	return origin_bios;
740}
741
742static void pending_complete(struct dm_snap_pending_exception *pe, int success)
743{
744	struct dm_snap_exception *e;
745	struct dm_snapshot *s = pe->snap;
746	struct bio *origin_bios = NULL;
747	struct bio *snapshot_bios = NULL;
748	int error = 0;
749
750	if (!success) {
751		/* Read/write error - snapshot is unusable */
752		down_write(&s->lock);
753		__invalidate_snapshot(s, -EIO);
754		error = 1;
755		goto out;
756	}
757
758	e = alloc_exception();
759	if (!e) {
760		down_write(&s->lock);
761		__invalidate_snapshot(s, -ENOMEM);
762		error = 1;
763		goto out;
764	}
765	*e = pe->e;
766
767	down_write(&s->lock);
768	if (!s->valid) {
769		free_exception(e);
770		error = 1;
771		goto out;
772	}
773
774	/*
775	 * Add a proper exception, and remove the
776	 * in-flight exception from the list.
777	 */
778	insert_completed_exception(s, e);
779
780 out:
781	remove_exception(&pe->e);
782	snapshot_bios = bio_list_get(&pe->snapshot_bios);
783	origin_bios = put_pending_exception(pe);
784
785	up_write(&s->lock);
786
787	/* Submit any pending write bios */
788	if (error)
789		error_bios(snapshot_bios);
790	else
791		flush_bios(snapshot_bios);
792
793	flush_bios(origin_bios);
794}
795
796static void commit_callback(void *context, int success)
797{
798	struct dm_snap_pending_exception *pe = context;
799
800	pending_complete(pe, success);
801}
802
803/*
804 * Called when the copy I/O has finished.  kcopyd actually runs
805 * this code so don't block.
806 */
807static void copy_callback(int read_err, unsigned long write_err, void *context)
808{
809	struct dm_snap_pending_exception *pe = context;
810	struct dm_snapshot *s = pe->snap;
811
812	if (read_err || write_err)
813		pending_complete(pe, 0);
814
815	else
816		/* Update the metadata if we are persistent */
817		s->store.commit_exception(&s->store, &pe->e, commit_callback,
818					  pe);
819}
820
821/*
822 * Dispatches the copy operation to kcopyd.
823 */
824static void start_copy(struct dm_snap_pending_exception *pe)
825{
826	struct dm_snapshot *s = pe->snap;
827	struct dm_io_region src, dest;
828	struct block_device *bdev = s->origin->bdev;
829	sector_t dev_size;
830
831	dev_size = get_dev_size(bdev);
832
833	src.bdev = bdev;
834	src.sector = chunk_to_sector(s, pe->e.old_chunk);
835	src.count = min(s->chunk_size, dev_size - src.sector);
836
837	dest.bdev = s->cow->bdev;
838	dest.sector = chunk_to_sector(s, pe->e.new_chunk);
839	dest.count = src.count;
840
841	/* Hand over to kcopyd */
842	dm_kcopyd_copy(s->kcopyd_client,
843		    &src, 1, &dest, 0, copy_callback, pe);
844}
845
846/*
847 * Looks to see if this snapshot already has a pending exception
848 * for this chunk, otherwise it allocates a new one and inserts
849 * it into the pending table.
850 *
851 * NOTE: a write lock must be held on snap->lock before calling
852 * this.
853 */
854static struct dm_snap_pending_exception *
855__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
856{
857	struct dm_snap_exception *e;
858	struct dm_snap_pending_exception *pe;
859	chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
860
861	/*
862	 * Is there a pending exception for this already ?
863	 */
864	e = lookup_exception(&s->pending, chunk);
865	if (e) {
866		/* cast the exception to a pending exception */
867		pe = container_of(e, struct dm_snap_pending_exception, e);
868		goto out;
869	}
870
871	/*
872	 * Create a new pending exception, we don't want
873	 * to hold the lock while we do this.
874	 */
875	up_write(&s->lock);
876	pe = alloc_pending_exception();
877	down_write(&s->lock);
878
879	if (!s->valid) {
880		free_pending_exception(pe);
881		return NULL;
882	}
883
884	e = lookup_exception(&s->pending, chunk);
885	if (e) {
886		free_pending_exception(pe);
887		pe = container_of(e, struct dm_snap_pending_exception, e);
888		goto out;
889	}
890
891	pe->e.old_chunk = chunk;
892	bio_list_init(&pe->origin_bios);
893	bio_list_init(&pe->snapshot_bios);
894	pe->primary_pe = NULL;
895	atomic_set(&pe->ref_count, 0);
896	pe->snap = s;
897	pe->started = 0;
898
899	if (s->store.prepare_exception(&s->store, &pe->e)) {
900		free_pending_exception(pe);
901		return NULL;
902	}
903
904	get_pending_exception(pe);
905	insert_exception(&s->pending, &pe->e);
906
907 out:
908	return pe;
909}
910
911static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
912			    struct bio *bio, chunk_t chunk)
913{
914	bio->bi_bdev = s->cow->bdev;
915	bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
916			 (chunk - e->old_chunk)) +
917			 (bio->bi_sector & s->chunk_mask);
918}
919
920static int snapshot_map(struct dm_target *ti, struct bio *bio,
921			union map_info *map_context)
922{
923	struct dm_snap_exception *e;
924	struct dm_snapshot *s = ti->private;
925	int r = DM_MAPIO_REMAPPED;
926	chunk_t chunk;
927	struct dm_snap_pending_exception *pe = NULL;
928
929	chunk = sector_to_chunk(s, bio->bi_sector);
930
931	/* Full snapshots are not usable */
932	/* To get here the table must be live so s->active is always set. */
933	if (!s->valid)
934		return -EIO;
935
936	/* FIXME: should only take write lock if we need
937	 * to copy an exception */
938	down_write(&s->lock);
939
940	if (!s->valid) {
941		r = -EIO;
942		goto out_unlock;
943	}
944
945	/* If the block is already remapped - use that, else remap it */
946	e = lookup_exception(&s->complete, chunk);
947	if (e) {
948		remap_exception(s, e, bio, chunk);
949		goto out_unlock;
950	}
951
952	/*
953	 * Write to snapshot - higher level takes care of RW/RO
954	 * flags so we should only get this if we are
955	 * writeable.
956	 */
957	if (bio_rw(bio) == WRITE) {
958		pe = __find_pending_exception(s, bio);
959		if (!pe) {
960			__invalidate_snapshot(s, -ENOMEM);
961			r = -EIO;
962			goto out_unlock;
963		}
964
965		remap_exception(s, &pe->e, bio, chunk);
966		bio_list_add(&pe->snapshot_bios, bio);
967
968		r = DM_MAPIO_SUBMITTED;
969
970		if (!pe->started) {
971			/* this is protected by snap->lock */
972			pe->started = 1;
973			up_write(&s->lock);
974			start_copy(pe);
975			goto out;
976		}
977	} else
978		/*
979		 * FIXME: this read path scares me because we
980		 * always use the origin when we have a pending
981		 * exception.  However I can't think of a
982		 * situation where this is wrong - ejt.
983		 */
984		bio->bi_bdev = s->origin->bdev;
985
986 out_unlock:
987	up_write(&s->lock);
988 out:
989	return r;
990}
991
992static void snapshot_resume(struct dm_target *ti)
993{
994	struct dm_snapshot *s = ti->private;
995
996	down_write(&s->lock);
997	s->active = 1;
998	up_write(&s->lock);
999}
1000
1001static int snapshot_status(struct dm_target *ti, status_type_t type,
1002			   char *result, unsigned int maxlen)
1003{
1004	struct dm_snapshot *snap = ti->private;
1005
1006	switch (type) {
1007	case STATUSTYPE_INFO:
1008		if (!snap->valid)
1009			snprintf(result, maxlen, "Invalid");
1010		else {
1011			if (snap->store.fraction_full) {
1012				sector_t numerator, denominator;
1013				snap->store.fraction_full(&snap->store,
1014							  &numerator,
1015							  &denominator);
1016				snprintf(result, maxlen, "%llu/%llu",
1017					(unsigned long long)numerator,
1018					(unsigned long long)denominator);
1019			}
1020			else
1021				snprintf(result, maxlen, "Unknown");
1022		}
1023		break;
1024
1025	case STATUSTYPE_TABLE:
1026		/*
1027		 * kdevname returns a static pointer so we need
1028		 * to make private copies if the output is to
1029		 * make sense.
1030		 */
1031		snprintf(result, maxlen, "%s %s %c %llu",
1032			 snap->origin->name, snap->cow->name,
1033			 snap->type,
1034			 (unsigned long long)snap->chunk_size);
1035		break;
1036	}
1037
1038	return 0;
1039}
1040
1041/*-----------------------------------------------------------------
1042 * Origin methods
1043 *---------------------------------------------------------------*/
1044static int __origin_write(struct list_head *snapshots, struct bio *bio)
1045{
1046	int r = DM_MAPIO_REMAPPED, first = 0;
1047	struct dm_snapshot *snap;
1048	struct dm_snap_exception *e;
1049	struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1050	chunk_t chunk;
1051	LIST_HEAD(pe_queue);
1052
1053	/* Do all the snapshots on this origin */
1054	list_for_each_entry (snap, snapshots, list) {
1055
1056		down_write(&snap->lock);
1057
1058		/* Only deal with valid and active snapshots */
1059		if (!snap->valid || !snap->active)
1060			goto next_snapshot;
1061
1062		/* Nothing to do if writing beyond end of snapshot */
1063		if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1064			goto next_snapshot;
1065
1066		/*
1067		 * Remember, different snapshots can have
1068		 * different chunk sizes.
1069		 */
1070		chunk = sector_to_chunk(snap, bio->bi_sector);
1071
1072		/*
1073		 * Check exception table to see if block
1074		 * is already remapped in this snapshot
1075		 * and trigger an exception if not.
1076		 *
1077		 * ref_count is initialised to 1 so pending_complete()
1078		 * won't destroy the primary_pe while we're inside this loop.
1079		 */
1080		e = lookup_exception(&snap->complete, chunk);
1081		if (e)
1082			goto next_snapshot;
1083
1084		pe = __find_pending_exception(snap, bio);
1085		if (!pe) {
1086			__invalidate_snapshot(snap, -ENOMEM);
1087			goto next_snapshot;
1088		}
1089
1090		if (!primary_pe) {
1091			/*
1092			 * Either every pe here has same
1093			 * primary_pe or none has one yet.
1094			 */
1095			if (pe->primary_pe)
1096				primary_pe = pe->primary_pe;
1097			else {
1098				primary_pe = pe;
1099				first = 1;
1100			}
1101
1102			bio_list_add(&primary_pe->origin_bios, bio);
1103
1104			r = DM_MAPIO_SUBMITTED;
1105		}
1106
1107		if (!pe->primary_pe) {
1108			pe->primary_pe = primary_pe;
1109			get_pending_exception(primary_pe);
1110		}
1111
1112		if (!pe->started) {
1113			pe->started = 1;
1114			list_add_tail(&pe->list, &pe_queue);
1115		}
1116
1117 next_snapshot:
1118		up_write(&snap->lock);
1119	}
1120
1121	if (!primary_pe)
1122		return r;
1123
1124	/*
1125	 * If this is the first time we're processing this chunk and
1126	 * ref_count is now 1 it means all the pending exceptions
1127	 * got completed while we were in the loop above, so it falls to
1128	 * us here to remove the primary_pe and submit any origin_bios.
1129	 */
1130
1131	if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1132		flush_bios(bio_list_get(&primary_pe->origin_bios));
1133		free_pending_exception(primary_pe);
1134		/* If we got here, pe_queue is necessarily empty. */
1135		return r;
1136	}
1137
1138	/*
1139	 * Now that we have a complete pe list we can start the copying.
1140	 */
1141	list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1142		start_copy(pe);
1143
1144	return r;
1145}
1146
1147/*
1148 * Called on a write from the origin driver.
1149 */
1150static int do_origin(struct dm_dev *origin, struct bio *bio)
1151{
1152	struct origin *o;
1153	int r = DM_MAPIO_REMAPPED;
1154
1155	down_read(&_origins_lock);
1156	o = __lookup_origin(origin->bdev);
1157	if (o)
1158		r = __origin_write(&o->snapshots, bio);
1159	up_read(&_origins_lock);
1160
1161	return r;
1162}
1163
1164/*
1165 * Origin: maps a linear range of a device, with hooks for snapshotting.
1166 */
1167
1168/*
1169 * Construct an origin mapping: <dev_path>
1170 * The context for an origin is merely a 'struct dm_dev *'
1171 * pointing to the real device.
1172 */
1173static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1174{
1175	int r;
1176	struct dm_dev *dev;
1177
1178	if (argc != 1) {
1179		ti->error = "origin: incorrect number of arguments";
1180		return -EINVAL;
1181	}
1182
1183	r = dm_get_device(ti, argv[0], 0, ti->len,
1184			  dm_table_get_mode(ti->table), &dev);
1185	if (r) {
1186		ti->error = "Cannot get target device";
1187		return r;
1188	}
1189
1190	ti->private = dev;
1191	return 0;
1192}
1193
1194static void origin_dtr(struct dm_target *ti)
1195{
1196	struct dm_dev *dev = ti->private;
1197	dm_put_device(ti, dev);
1198}
1199
1200static int origin_map(struct dm_target *ti, struct bio *bio,
1201		      union map_info *map_context)
1202{
1203	struct dm_dev *dev = ti->private;
1204	bio->bi_bdev = dev->bdev;
1205
1206	/* Only tell snapshots if this is a write */
1207	return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1208}
1209
1210#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1211
1212/*
1213 * Set the target "split_io" field to the minimum of all the snapshots'
1214 * chunk sizes.
1215 */
1216static void origin_resume(struct dm_target *ti)
1217{
1218	struct dm_dev *dev = ti->private;
1219	struct dm_snapshot *snap;
1220	struct origin *o;
1221	chunk_t chunk_size = 0;
1222
1223	down_read(&_origins_lock);
1224	o = __lookup_origin(dev->bdev);
1225	if (o)
1226		list_for_each_entry (snap, &o->snapshots, list)
1227			chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1228	up_read(&_origins_lock);
1229
1230	ti->split_io = chunk_size;
1231}
1232
1233static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1234			 unsigned int maxlen)
1235{
1236	struct dm_dev *dev = ti->private;
1237
1238	switch (type) {
1239	case STATUSTYPE_INFO:
1240		result[0] = '\0';
1241		break;
1242
1243	case STATUSTYPE_TABLE:
1244		snprintf(result, maxlen, "%s", dev->name);
1245		break;
1246	}
1247
1248	return 0;
1249}
1250
1251static struct target_type origin_target = {
1252	.name    = "snapshot-origin",
1253	.version = {1, 6, 0},
1254	.module  = THIS_MODULE,
1255	.ctr     = origin_ctr,
1256	.dtr     = origin_dtr,
1257	.map     = origin_map,
1258	.resume  = origin_resume,
1259	.status  = origin_status,
1260};
1261
1262static struct target_type snapshot_target = {
1263	.name    = "snapshot",
1264	.version = {1, 6, 0},
1265	.module  = THIS_MODULE,
1266	.ctr     = snapshot_ctr,
1267	.dtr     = snapshot_dtr,
1268	.map     = snapshot_map,
1269	.resume  = snapshot_resume,
1270	.status  = snapshot_status,
1271};
1272
1273static int __init dm_snapshot_init(void)
1274{
1275	int r;
1276
1277	r = dm_register_target(&snapshot_target);
1278	if (r) {
1279		DMERR("snapshot target register failed %d", r);
1280		return r;
1281	}
1282
1283	r = dm_register_target(&origin_target);
1284	if (r < 0) {
1285		DMERR("Origin target register failed %d", r);
1286		goto bad1;
1287	}
1288
1289	r = init_origin_hash();
1290	if (r) {
1291		DMERR("init_origin_hash failed.");
1292		goto bad2;
1293	}
1294
1295	exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1296	if (!exception_cache) {
1297		DMERR("Couldn't create exception cache.");
1298		r = -ENOMEM;
1299		goto bad3;
1300	}
1301
1302	pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1303	if (!pending_cache) {
1304		DMERR("Couldn't create pending cache.");
1305		r = -ENOMEM;
1306		goto bad4;
1307	}
1308
1309	pending_pool = mempool_create_slab_pool(128, pending_cache);
1310	if (!pending_pool) {
1311		DMERR("Couldn't create pending pool.");
1312		r = -ENOMEM;
1313		goto bad5;
1314	}
1315
1316	ksnapd = create_singlethread_workqueue("ksnapd");
1317	if (!ksnapd) {
1318		DMERR("Failed to create ksnapd workqueue.");
1319		r = -ENOMEM;
1320		goto bad6;
1321	}
1322
1323	return 0;
1324
1325      bad6:
1326	mempool_destroy(pending_pool);
1327      bad5:
1328	kmem_cache_destroy(pending_cache);
1329      bad4:
1330	kmem_cache_destroy(exception_cache);
1331      bad3:
1332	exit_origin_hash();
1333      bad2:
1334	dm_unregister_target(&origin_target);
1335      bad1:
1336	dm_unregister_target(&snapshot_target);
1337	return r;
1338}
1339
1340static void __exit dm_snapshot_exit(void)
1341{
1342	int r;
1343
1344	destroy_workqueue(ksnapd);
1345
1346	r = dm_unregister_target(&snapshot_target);
1347	if (r)
1348		DMERR("snapshot unregister failed %d", r);
1349
1350	r = dm_unregister_target(&origin_target);
1351	if (r)
1352		DMERR("origin unregister failed %d", r);
1353
1354	exit_origin_hash();
1355	mempool_destroy(pending_pool);
1356	kmem_cache_destroy(pending_cache);
1357	kmem_cache_destroy(exception_cache);
1358}
1359
1360/* Module hooks */
1361module_init(dm_snapshot_init);
1362module_exit(dm_snapshot_exit);
1363
1364MODULE_DESCRIPTION(DM_NAME " snapshot target");
1365MODULE_AUTHOR("Joe Thornber");
1366MODULE_LICENSE("GPL");
1367