dm-io.c revision d87f4c14f27dc82d215108d8392a7d26687148a1
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dm-io.h>
18
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS	BITS_PER_LONG
22
23struct dm_io_client {
24	mempool_t *pool;
25	struct bio_set *bios;
26};
27
28/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address.  Refer to store_io_and_region_in_bio() below.
31 */
32struct io {
33	unsigned long error_bits;
34	atomic_t count;
35	struct task_struct *sleeper;
36	struct dm_io_client *client;
37	io_notify_fn callback;
38	void *context;
39} __attribute__((aligned(DM_IO_MAX_REGIONS)));
40
41static struct kmem_cache *_dm_io_cache;
42
43/*
44 * io contexts are only dynamically allocated for asynchronous
45 * io.  Since async io is likely to be the majority of io we'll
46 * have the same number of io contexts as bios! (FIXME: must reduce this).
47 */
48
49static unsigned int pages_to_ios(unsigned int pages)
50{
51	return 4 * pages;	/* too many ? */
52}
53
54/*
55 * Create a client with mempool and bioset.
56 */
57struct dm_io_client *dm_io_client_create(unsigned num_pages)
58{
59	unsigned ios = pages_to_ios(num_pages);
60	struct dm_io_client *client;
61
62	client = kmalloc(sizeof(*client), GFP_KERNEL);
63	if (!client)
64		return ERR_PTR(-ENOMEM);
65
66	client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
67	if (!client->pool)
68		goto bad;
69
70	client->bios = bioset_create(16, 0);
71	if (!client->bios)
72		goto bad;
73
74	return client;
75
76   bad:
77	if (client->pool)
78		mempool_destroy(client->pool);
79	kfree(client);
80	return ERR_PTR(-ENOMEM);
81}
82EXPORT_SYMBOL(dm_io_client_create);
83
84int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
85{
86	return mempool_resize(client->pool, pages_to_ios(num_pages),
87			      GFP_KERNEL);
88}
89EXPORT_SYMBOL(dm_io_client_resize);
90
91void dm_io_client_destroy(struct dm_io_client *client)
92{
93	mempool_destroy(client->pool);
94	bioset_free(client->bios);
95	kfree(client);
96}
97EXPORT_SYMBOL(dm_io_client_destroy);
98
99/*-----------------------------------------------------------------
100 * We need to keep track of which region a bio is doing io for.
101 * To avoid a memory allocation to store just 5 or 6 bits, we
102 * ensure the 'struct io' pointer is aligned so enough low bits are
103 * always zero and then combine it with the region number directly in
104 * bi_private.
105 *---------------------------------------------------------------*/
106static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
107				       unsigned region)
108{
109	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
110		DMCRIT("Unaligned struct io pointer %p", io);
111		BUG();
112	}
113
114	bio->bi_private = (void *)((unsigned long)io | region);
115}
116
117static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
118				       unsigned *region)
119{
120	unsigned long val = (unsigned long)bio->bi_private;
121
122	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
123	*region = val & (DM_IO_MAX_REGIONS - 1);
124}
125
126/*-----------------------------------------------------------------
127 * We need an io object to keep track of the number of bios that
128 * have been dispatched for a particular io.
129 *---------------------------------------------------------------*/
130static void dec_count(struct io *io, unsigned int region, int error)
131{
132	if (error)
133		set_bit(region, &io->error_bits);
134
135	if (atomic_dec_and_test(&io->count)) {
136		if (io->sleeper)
137			wake_up_process(io->sleeper);
138
139		else {
140			unsigned long r = io->error_bits;
141			io_notify_fn fn = io->callback;
142			void *context = io->context;
143
144			mempool_free(io, io->client->pool);
145			fn(r, context);
146		}
147	}
148}
149
150static void endio(struct bio *bio, int error)
151{
152	struct io *io;
153	unsigned region;
154
155	if (error && bio_data_dir(bio) == READ)
156		zero_fill_bio(bio);
157
158	/*
159	 * The bio destructor in bio_put() may use the io object.
160	 */
161	retrieve_io_and_region_from_bio(bio, &io, &region);
162
163	bio_put(bio);
164
165	dec_count(io, region, error);
166}
167
168/*-----------------------------------------------------------------
169 * These little objects provide an abstraction for getting a new
170 * destination page for io.
171 *---------------------------------------------------------------*/
172struct dpages {
173	void (*get_page)(struct dpages *dp,
174			 struct page **p, unsigned long *len, unsigned *offset);
175	void (*next_page)(struct dpages *dp);
176
177	unsigned context_u;
178	void *context_ptr;
179};
180
181/*
182 * Functions for getting the pages from a list.
183 */
184static void list_get_page(struct dpages *dp,
185		  struct page **p, unsigned long *len, unsigned *offset)
186{
187	unsigned o = dp->context_u;
188	struct page_list *pl = (struct page_list *) dp->context_ptr;
189
190	*p = pl->page;
191	*len = PAGE_SIZE - o;
192	*offset = o;
193}
194
195static void list_next_page(struct dpages *dp)
196{
197	struct page_list *pl = (struct page_list *) dp->context_ptr;
198	dp->context_ptr = pl->next;
199	dp->context_u = 0;
200}
201
202static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
203{
204	dp->get_page = list_get_page;
205	dp->next_page = list_next_page;
206	dp->context_u = offset;
207	dp->context_ptr = pl;
208}
209
210/*
211 * Functions for getting the pages from a bvec.
212 */
213static void bvec_get_page(struct dpages *dp,
214		  struct page **p, unsigned long *len, unsigned *offset)
215{
216	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217	*p = bvec->bv_page;
218	*len = bvec->bv_len;
219	*offset = bvec->bv_offset;
220}
221
222static void bvec_next_page(struct dpages *dp)
223{
224	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
225	dp->context_ptr = bvec + 1;
226}
227
228static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
229{
230	dp->get_page = bvec_get_page;
231	dp->next_page = bvec_next_page;
232	dp->context_ptr = bvec;
233}
234
235/*
236 * Functions for getting the pages from a VMA.
237 */
238static void vm_get_page(struct dpages *dp,
239		 struct page **p, unsigned long *len, unsigned *offset)
240{
241	*p = vmalloc_to_page(dp->context_ptr);
242	*offset = dp->context_u;
243	*len = PAGE_SIZE - dp->context_u;
244}
245
246static void vm_next_page(struct dpages *dp)
247{
248	dp->context_ptr += PAGE_SIZE - dp->context_u;
249	dp->context_u = 0;
250}
251
252static void vm_dp_init(struct dpages *dp, void *data)
253{
254	dp->get_page = vm_get_page;
255	dp->next_page = vm_next_page;
256	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
257	dp->context_ptr = data;
258}
259
260static void dm_bio_destructor(struct bio *bio)
261{
262	unsigned region;
263	struct io *io;
264
265	retrieve_io_and_region_from_bio(bio, &io, &region);
266
267	bio_free(bio, io->client->bios);
268}
269
270/*
271 * Functions for getting the pages from kernel memory.
272 */
273static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
274			unsigned *offset)
275{
276	*p = virt_to_page(dp->context_ptr);
277	*offset = dp->context_u;
278	*len = PAGE_SIZE - dp->context_u;
279}
280
281static void km_next_page(struct dpages *dp)
282{
283	dp->context_ptr += PAGE_SIZE - dp->context_u;
284	dp->context_u = 0;
285}
286
287static void km_dp_init(struct dpages *dp, void *data)
288{
289	dp->get_page = km_get_page;
290	dp->next_page = km_next_page;
291	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
292	dp->context_ptr = data;
293}
294
295/*-----------------------------------------------------------------
296 * IO routines that accept a list of pages.
297 *---------------------------------------------------------------*/
298static void do_region(int rw, unsigned region, struct dm_io_region *where,
299		      struct dpages *dp, struct io *io)
300{
301	struct bio *bio;
302	struct page *page;
303	unsigned long len;
304	unsigned offset;
305	unsigned num_bvecs;
306	sector_t remaining = where->count;
307
308	/*
309	 * where->count may be zero if rw holds a flush and we need to
310	 * send a zero-sized flush.
311	 */
312	do {
313		/*
314		 * Allocate a suitably sized-bio.
315		 */
316		num_bvecs = dm_sector_div_up(remaining,
317					     (PAGE_SIZE >> SECTOR_SHIFT));
318		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
319		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
320		bio->bi_sector = where->sector + (where->count - remaining);
321		bio->bi_bdev = where->bdev;
322		bio->bi_end_io = endio;
323		bio->bi_destructor = dm_bio_destructor;
324		store_io_and_region_in_bio(bio, io, region);
325
326		/*
327		 * Try and add as many pages as possible.
328		 */
329		while (remaining) {
330			dp->get_page(dp, &page, &len, &offset);
331			len = min(len, to_bytes(remaining));
332			if (!bio_add_page(bio, page, len, offset))
333				break;
334
335			offset = 0;
336			remaining -= to_sector(len);
337			dp->next_page(dp);
338		}
339
340		atomic_inc(&io->count);
341		submit_bio(rw, bio);
342	} while (remaining);
343}
344
345static void dispatch_io(int rw, unsigned int num_regions,
346			struct dm_io_region *where, struct dpages *dp,
347			struct io *io, int sync)
348{
349	int i;
350	struct dpages old_pages = *dp;
351
352	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
353
354	if (sync)
355		rw |= REQ_SYNC | REQ_UNPLUG;
356
357	/*
358	 * For multiple regions we need to be careful to rewind
359	 * the dp object for each call to do_region.
360	 */
361	for (i = 0; i < num_regions; i++) {
362		*dp = old_pages;
363		if (where[i].count || (rw & REQ_FLUSH))
364			do_region(rw, i, where + i, dp, io);
365	}
366
367	/*
368	 * Drop the extra reference that we were holding to avoid
369	 * the io being completed too early.
370	 */
371	dec_count(io, 0, 0);
372}
373
374static int sync_io(struct dm_io_client *client, unsigned int num_regions,
375		   struct dm_io_region *where, int rw, struct dpages *dp,
376		   unsigned long *error_bits)
377{
378	/*
379	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
380	 * align it on our own.
381	 * volatile prevents the optimizer from removing or reusing
382	 * "io_" field from the stack frame (allowed in ANSI C).
383	 */
384	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
385	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
386
387	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
388		WARN_ON(1);
389		return -EIO;
390	}
391
392	io->error_bits = 0;
393	atomic_set(&io->count, 1); /* see dispatch_io() */
394	io->sleeper = current;
395	io->client = client;
396
397	dispatch_io(rw, num_regions, where, dp, io, 1);
398
399	while (1) {
400		set_current_state(TASK_UNINTERRUPTIBLE);
401
402		if (!atomic_read(&io->count))
403			break;
404
405		io_schedule();
406	}
407	set_current_state(TASK_RUNNING);
408
409	if (error_bits)
410		*error_bits = io->error_bits;
411
412	return io->error_bits ? -EIO : 0;
413}
414
415static int async_io(struct dm_io_client *client, unsigned int num_regions,
416		    struct dm_io_region *where, int rw, struct dpages *dp,
417		    io_notify_fn fn, void *context)
418{
419	struct io *io;
420
421	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
422		WARN_ON(1);
423		fn(1, context);
424		return -EIO;
425	}
426
427	io = mempool_alloc(client->pool, GFP_NOIO);
428	io->error_bits = 0;
429	atomic_set(&io->count, 1); /* see dispatch_io() */
430	io->sleeper = NULL;
431	io->client = client;
432	io->callback = fn;
433	io->context = context;
434
435	dispatch_io(rw, num_regions, where, dp, io, 0);
436	return 0;
437}
438
439static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
440{
441	/* Set up dpages based on memory type */
442	switch (io_req->mem.type) {
443	case DM_IO_PAGE_LIST:
444		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
445		break;
446
447	case DM_IO_BVEC:
448		bvec_dp_init(dp, io_req->mem.ptr.bvec);
449		break;
450
451	case DM_IO_VMA:
452		vm_dp_init(dp, io_req->mem.ptr.vma);
453		break;
454
455	case DM_IO_KMEM:
456		km_dp_init(dp, io_req->mem.ptr.addr);
457		break;
458
459	default:
460		return -EINVAL;
461	}
462
463	return 0;
464}
465
466/*
467 * New collapsed (a)synchronous interface.
468 *
469 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
470 * the queue with blk_unplug() some time later or set REQ_SYNC in
471io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
472 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
473 */
474int dm_io(struct dm_io_request *io_req, unsigned num_regions,
475	  struct dm_io_region *where, unsigned long *sync_error_bits)
476{
477	int r;
478	struct dpages dp;
479
480	r = dp_init(io_req, &dp);
481	if (r)
482		return r;
483
484	if (!io_req->notify.fn)
485		return sync_io(io_req->client, num_regions, where,
486			       io_req->bi_rw, &dp, sync_error_bits);
487
488	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
489			&dp, io_req->notify.fn, io_req->notify.context);
490}
491EXPORT_SYMBOL(dm_io);
492
493int __init dm_io_init(void)
494{
495	_dm_io_cache = KMEM_CACHE(io, 0);
496	if (!_dm_io_cache)
497		return -ENOMEM;
498
499	return 0;
500}
501
502void dm_io_exit(void)
503{
504	kmem_cache_destroy(_dm_io_cache);
505	_dm_io_cache = NULL;
506}
507