dm-io.c revision 12fc0f49dc994d8d90dcf3df13f5b1ee5441288d
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dm-io.h>
18
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS	BITS_PER_LONG
22
23struct dm_io_client {
24	mempool_t *pool;
25	struct bio_set *bios;
26};
27
28/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address.  Refer to store_io_and_region_in_bio() below.
31 */
32struct io {
33	unsigned long error_bits;
34	unsigned long eopnotsupp_bits;
35	atomic_t count;
36	struct task_struct *sleeper;
37	struct dm_io_client *client;
38	io_notify_fn callback;
39	void *context;
40} __attribute__((aligned(DM_IO_MAX_REGIONS)));
41
42static struct kmem_cache *_dm_io_cache;
43
44/*
45 * io contexts are only dynamically allocated for asynchronous
46 * io.  Since async io is likely to be the majority of io we'll
47 * have the same number of io contexts as bios! (FIXME: must reduce this).
48 */
49
50static unsigned int pages_to_ios(unsigned int pages)
51{
52	return 4 * pages;	/* too many ? */
53}
54
55/*
56 * Create a client with mempool and bioset.
57 */
58struct dm_io_client *dm_io_client_create(unsigned num_pages)
59{
60	unsigned ios = pages_to_ios(num_pages);
61	struct dm_io_client *client;
62
63	client = kmalloc(sizeof(*client), GFP_KERNEL);
64	if (!client)
65		return ERR_PTR(-ENOMEM);
66
67	client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
68	if (!client->pool)
69		goto bad;
70
71	client->bios = bioset_create(16, 0);
72	if (!client->bios)
73		goto bad;
74
75	return client;
76
77   bad:
78	if (client->pool)
79		mempool_destroy(client->pool);
80	kfree(client);
81	return ERR_PTR(-ENOMEM);
82}
83EXPORT_SYMBOL(dm_io_client_create);
84
85int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
86{
87	return mempool_resize(client->pool, pages_to_ios(num_pages),
88			      GFP_KERNEL);
89}
90EXPORT_SYMBOL(dm_io_client_resize);
91
92void dm_io_client_destroy(struct dm_io_client *client)
93{
94	mempool_destroy(client->pool);
95	bioset_free(client->bios);
96	kfree(client);
97}
98EXPORT_SYMBOL(dm_io_client_destroy);
99
100/*-----------------------------------------------------------------
101 * We need to keep track of which region a bio is doing io for.
102 * To avoid a memory allocation to store just 5 or 6 bits, we
103 * ensure the 'struct io' pointer is aligned so enough low bits are
104 * always zero and then combine it with the region number directly in
105 * bi_private.
106 *---------------------------------------------------------------*/
107static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
108				       unsigned region)
109{
110	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
111		DMCRIT("Unaligned struct io pointer %p", io);
112		BUG();
113	}
114
115	bio->bi_private = (void *)((unsigned long)io | region);
116}
117
118static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
119				       unsigned *region)
120{
121	unsigned long val = (unsigned long)bio->bi_private;
122
123	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
124	*region = val & (DM_IO_MAX_REGIONS - 1);
125}
126
127/*-----------------------------------------------------------------
128 * We need an io object to keep track of the number of bios that
129 * have been dispatched for a particular io.
130 *---------------------------------------------------------------*/
131static void dec_count(struct io *io, unsigned int region, int error)
132{
133	if (error) {
134		set_bit(region, &io->error_bits);
135		if (error == -EOPNOTSUPP)
136			set_bit(region, &io->eopnotsupp_bits);
137	}
138
139	if (atomic_dec_and_test(&io->count)) {
140		if (io->sleeper)
141			wake_up_process(io->sleeper);
142
143		else {
144			unsigned long r = io->error_bits;
145			io_notify_fn fn = io->callback;
146			void *context = io->context;
147
148			mempool_free(io, io->client->pool);
149			fn(r, context);
150		}
151	}
152}
153
154static void endio(struct bio *bio, int error)
155{
156	struct io *io;
157	unsigned region;
158
159	if (error && bio_data_dir(bio) == READ)
160		zero_fill_bio(bio);
161
162	/*
163	 * The bio destructor in bio_put() may use the io object.
164	 */
165	retrieve_io_and_region_from_bio(bio, &io, &region);
166
167	bio_put(bio);
168
169	dec_count(io, region, error);
170}
171
172/*-----------------------------------------------------------------
173 * These little objects provide an abstraction for getting a new
174 * destination page for io.
175 *---------------------------------------------------------------*/
176struct dpages {
177	void (*get_page)(struct dpages *dp,
178			 struct page **p, unsigned long *len, unsigned *offset);
179	void (*next_page)(struct dpages *dp);
180
181	unsigned context_u;
182	void *context_ptr;
183};
184
185/*
186 * Functions for getting the pages from a list.
187 */
188static void list_get_page(struct dpages *dp,
189		  struct page **p, unsigned long *len, unsigned *offset)
190{
191	unsigned o = dp->context_u;
192	struct page_list *pl = (struct page_list *) dp->context_ptr;
193
194	*p = pl->page;
195	*len = PAGE_SIZE - o;
196	*offset = o;
197}
198
199static void list_next_page(struct dpages *dp)
200{
201	struct page_list *pl = (struct page_list *) dp->context_ptr;
202	dp->context_ptr = pl->next;
203	dp->context_u = 0;
204}
205
206static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
207{
208	dp->get_page = list_get_page;
209	dp->next_page = list_next_page;
210	dp->context_u = offset;
211	dp->context_ptr = pl;
212}
213
214/*
215 * Functions for getting the pages from a bvec.
216 */
217static void bvec_get_page(struct dpages *dp,
218		  struct page **p, unsigned long *len, unsigned *offset)
219{
220	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
221	*p = bvec->bv_page;
222	*len = bvec->bv_len;
223	*offset = bvec->bv_offset;
224}
225
226static void bvec_next_page(struct dpages *dp)
227{
228	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
229	dp->context_ptr = bvec + 1;
230}
231
232static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
233{
234	dp->get_page = bvec_get_page;
235	dp->next_page = bvec_next_page;
236	dp->context_ptr = bvec;
237}
238
239/*
240 * Functions for getting the pages from a VMA.
241 */
242static void vm_get_page(struct dpages *dp,
243		 struct page **p, unsigned long *len, unsigned *offset)
244{
245	*p = vmalloc_to_page(dp->context_ptr);
246	*offset = dp->context_u;
247	*len = PAGE_SIZE - dp->context_u;
248}
249
250static void vm_next_page(struct dpages *dp)
251{
252	dp->context_ptr += PAGE_SIZE - dp->context_u;
253	dp->context_u = 0;
254}
255
256static void vm_dp_init(struct dpages *dp, void *data)
257{
258	dp->get_page = vm_get_page;
259	dp->next_page = vm_next_page;
260	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
261	dp->context_ptr = data;
262}
263
264static void dm_bio_destructor(struct bio *bio)
265{
266	unsigned region;
267	struct io *io;
268
269	retrieve_io_and_region_from_bio(bio, &io, &region);
270
271	bio_free(bio, io->client->bios);
272}
273
274/*
275 * Functions for getting the pages from kernel memory.
276 */
277static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
278			unsigned *offset)
279{
280	*p = virt_to_page(dp->context_ptr);
281	*offset = dp->context_u;
282	*len = PAGE_SIZE - dp->context_u;
283}
284
285static void km_next_page(struct dpages *dp)
286{
287	dp->context_ptr += PAGE_SIZE - dp->context_u;
288	dp->context_u = 0;
289}
290
291static void km_dp_init(struct dpages *dp, void *data)
292{
293	dp->get_page = km_get_page;
294	dp->next_page = km_next_page;
295	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
296	dp->context_ptr = data;
297}
298
299/*-----------------------------------------------------------------
300 * IO routines that accept a list of pages.
301 *---------------------------------------------------------------*/
302static void do_region(int rw, unsigned region, struct dm_io_region *where,
303		      struct dpages *dp, struct io *io)
304{
305	struct bio *bio;
306	struct page *page;
307	unsigned long len;
308	unsigned offset;
309	unsigned num_bvecs;
310	sector_t remaining = where->count;
311
312	/*
313	 * where->count may be zero if rw holds a write barrier and we
314	 * need to send a zero-sized barrier.
315	 */
316	do {
317		/*
318		 * Allocate a suitably sized-bio.
319		 */
320		num_bvecs = dm_sector_div_up(remaining,
321					     (PAGE_SIZE >> SECTOR_SHIFT));
322		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
323		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
324		bio->bi_sector = where->sector + (where->count - remaining);
325		bio->bi_bdev = where->bdev;
326		bio->bi_end_io = endio;
327		bio->bi_destructor = dm_bio_destructor;
328		store_io_and_region_in_bio(bio, io, region);
329
330		/*
331		 * Try and add as many pages as possible.
332		 */
333		while (remaining) {
334			dp->get_page(dp, &page, &len, &offset);
335			len = min(len, to_bytes(remaining));
336			if (!bio_add_page(bio, page, len, offset))
337				break;
338
339			offset = 0;
340			remaining -= to_sector(len);
341			dp->next_page(dp);
342		}
343
344		atomic_inc(&io->count);
345		submit_bio(rw, bio);
346	} while (remaining);
347}
348
349static void dispatch_io(int rw, unsigned int num_regions,
350			struct dm_io_region *where, struct dpages *dp,
351			struct io *io, int sync)
352{
353	int i;
354	struct dpages old_pages = *dp;
355
356	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357
358	if (sync)
359		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
360
361	/*
362	 * For multiple regions we need to be careful to rewind
363	 * the dp object for each call to do_region.
364	 */
365	for (i = 0; i < num_regions; i++) {
366		*dp = old_pages;
367		if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
368			do_region(rw, i, where + i, dp, io);
369	}
370
371	/*
372	 * Drop the extra reference that we were holding to avoid
373	 * the io being completed too early.
374	 */
375	dec_count(io, 0, 0);
376}
377
378static int sync_io(struct dm_io_client *client, unsigned int num_regions,
379		   struct dm_io_region *where, int rw, struct dpages *dp,
380		   unsigned long *error_bits)
381{
382	/*
383	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
384	 * align it on our own.
385	 * volatile prevents the optimizer from removing or reusing
386	 * "io_" field from the stack frame (allowed in ANSI C).
387	 */
388	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
390
391	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
392		WARN_ON(1);
393		return -EIO;
394	}
395
396retry:
397	io->error_bits = 0;
398	io->eopnotsupp_bits = 0;
399	atomic_set(&io->count, 1); /* see dispatch_io() */
400	io->sleeper = current;
401	io->client = client;
402
403	dispatch_io(rw, num_regions, where, dp, io, 1);
404
405	while (1) {
406		set_current_state(TASK_UNINTERRUPTIBLE);
407
408		if (!atomic_read(&io->count))
409			break;
410
411		io_schedule();
412	}
413	set_current_state(TASK_RUNNING);
414
415	if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
416		rw &= ~(1 << BIO_RW_BARRIER);
417		goto retry;
418	}
419
420	if (error_bits)
421		*error_bits = io->error_bits;
422
423	return io->error_bits ? -EIO : 0;
424}
425
426static int async_io(struct dm_io_client *client, unsigned int num_regions,
427		    struct dm_io_region *where, int rw, struct dpages *dp,
428		    io_notify_fn fn, void *context)
429{
430	struct io *io;
431
432	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
433		WARN_ON(1);
434		fn(1, context);
435		return -EIO;
436	}
437
438	io = mempool_alloc(client->pool, GFP_NOIO);
439	io->error_bits = 0;
440	io->eopnotsupp_bits = 0;
441	atomic_set(&io->count, 1); /* see dispatch_io() */
442	io->sleeper = NULL;
443	io->client = client;
444	io->callback = fn;
445	io->context = context;
446
447	dispatch_io(rw, num_regions, where, dp, io, 0);
448	return 0;
449}
450
451static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
452{
453	/* Set up dpages based on memory type */
454	switch (io_req->mem.type) {
455	case DM_IO_PAGE_LIST:
456		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
457		break;
458
459	case DM_IO_BVEC:
460		bvec_dp_init(dp, io_req->mem.ptr.bvec);
461		break;
462
463	case DM_IO_VMA:
464		vm_dp_init(dp, io_req->mem.ptr.vma);
465		break;
466
467	case DM_IO_KMEM:
468		km_dp_init(dp, io_req->mem.ptr.addr);
469		break;
470
471	default:
472		return -EINVAL;
473	}
474
475	return 0;
476}
477
478/*
479 * New collapsed (a)synchronous interface.
480 *
481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
485 */
486int dm_io(struct dm_io_request *io_req, unsigned num_regions,
487	  struct dm_io_region *where, unsigned long *sync_error_bits)
488{
489	int r;
490	struct dpages dp;
491
492	r = dp_init(io_req, &dp);
493	if (r)
494		return r;
495
496	if (!io_req->notify.fn)
497		return sync_io(io_req->client, num_regions, where,
498			       io_req->bi_rw, &dp, sync_error_bits);
499
500	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
501			&dp, io_req->notify.fn, io_req->notify.context);
502}
503EXPORT_SYMBOL(dm_io);
504
505int __init dm_io_init(void)
506{
507	_dm_io_cache = KMEM_CACHE(io, 0);
508	if (!_dm_io_cache)
509		return -ENOMEM;
510
511	return 0;
512}
513
514void dm_io_exit(void)
515{
516	kmem_cache_destroy(_dm_io_cache);
517	_dm_io_cache = NULL;
518}
519