dm-io.c revision dd0fc66fb33cd610bc1a5db8a5e232d34879b4d7
1/*
2 * Copyright (C) 2003 Sistina Software
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-io.h"
8
9#include <linux/bio.h>
10#include <linux/mempool.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14
15static struct bio_set *_bios;
16
17/* FIXME: can we shrink this ? */
18struct io {
19	unsigned long error;
20	atomic_t count;
21	struct task_struct *sleeper;
22	io_notify_fn callback;
23	void *context;
24};
25
26/*
27 * io contexts are only dynamically allocated for asynchronous
28 * io.  Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME:
30 * must reduce this).
31 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34
35static void *alloc_io(gfp_t gfp_mask, void *pool_data)
36{
37	return kmalloc(sizeof(struct io), gfp_mask);
38}
39
40static void free_io(void *element, void *pool_data)
41{
42	kfree(element);
43}
44
45static unsigned int pages_to_ios(unsigned int pages)
46{
47	return 4 * pages;	/* too many ? */
48}
49
50static int resize_pool(unsigned int new_ios)
51{
52	int r = 0;
53
54	if (_io_pool) {
55		if (new_ios == 0) {
56			/* free off the pool */
57			mempool_destroy(_io_pool);
58			_io_pool = NULL;
59			bioset_free(_bios);
60
61		} else {
62			/* resize the pool */
63			r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
64		}
65
66	} else {
67		/* create new pool */
68		_io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
69		if (!_io_pool)
70			return -ENOMEM;
71
72		_bios = bioset_create(16, 16, 4);
73		if (!_bios) {
74			mempool_destroy(_io_pool);
75			_io_pool = NULL;
76			return -ENOMEM;
77		}
78	}
79
80	if (!r)
81		_num_ios = new_ios;
82
83	return r;
84}
85
86int dm_io_get(unsigned int num_pages)
87{
88	return resize_pool(_num_ios + pages_to_ios(num_pages));
89}
90
91void dm_io_put(unsigned int num_pages)
92{
93	resize_pool(_num_ios - pages_to_ios(num_pages));
94}
95
96/*-----------------------------------------------------------------
97 * We need to keep track of which region a bio is doing io for.
98 * In order to save a memory allocation we store this the last
99 * bvec which we know is unused (blech).
100 * XXX This is ugly and can OOPS with some configs... find another way.
101 *---------------------------------------------------------------*/
102static inline void bio_set_region(struct bio *bio, unsigned region)
103{
104	bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len = region;
105}
106
107static inline unsigned bio_get_region(struct bio *bio)
108{
109	return bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len;
110}
111
112/*-----------------------------------------------------------------
113 * We need an io object to keep track of the number of bios that
114 * have been dispatched for a particular io.
115 *---------------------------------------------------------------*/
116static void dec_count(struct io *io, unsigned int region, int error)
117{
118	if (error)
119		set_bit(region, &io->error);
120
121	if (atomic_dec_and_test(&io->count)) {
122		if (io->sleeper)
123			wake_up_process(io->sleeper);
124
125		else {
126			int r = io->error;
127			io_notify_fn fn = io->callback;
128			void *context = io->context;
129
130			mempool_free(io, _io_pool);
131			fn(r, context);
132		}
133	}
134}
135
136static int endio(struct bio *bio, unsigned int done, int error)
137{
138	struct io *io = (struct io *) bio->bi_private;
139
140	/* keep going until we've finished */
141	if (bio->bi_size)
142		return 1;
143
144	if (error && bio_data_dir(bio) == READ)
145		zero_fill_bio(bio);
146
147	dec_count(io, bio_get_region(bio), error);
148	bio_put(bio);
149
150	return 0;
151}
152
153/*-----------------------------------------------------------------
154 * These little objects provide an abstraction for getting a new
155 * destination page for io.
156 *---------------------------------------------------------------*/
157struct dpages {
158	void (*get_page)(struct dpages *dp,
159			 struct page **p, unsigned long *len, unsigned *offset);
160	void (*next_page)(struct dpages *dp);
161
162	unsigned context_u;
163	void *context_ptr;
164};
165
166/*
167 * Functions for getting the pages from a list.
168 */
169static void list_get_page(struct dpages *dp,
170		  struct page **p, unsigned long *len, unsigned *offset)
171{
172	unsigned o = dp->context_u;
173	struct page_list *pl = (struct page_list *) dp->context_ptr;
174
175	*p = pl->page;
176	*len = PAGE_SIZE - o;
177	*offset = o;
178}
179
180static void list_next_page(struct dpages *dp)
181{
182	struct page_list *pl = (struct page_list *) dp->context_ptr;
183	dp->context_ptr = pl->next;
184	dp->context_u = 0;
185}
186
187static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
188{
189	dp->get_page = list_get_page;
190	dp->next_page = list_next_page;
191	dp->context_u = offset;
192	dp->context_ptr = pl;
193}
194
195/*
196 * Functions for getting the pages from a bvec.
197 */
198static void bvec_get_page(struct dpages *dp,
199		  struct page **p, unsigned long *len, unsigned *offset)
200{
201	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202	*p = bvec->bv_page;
203	*len = bvec->bv_len;
204	*offset = bvec->bv_offset;
205}
206
207static void bvec_next_page(struct dpages *dp)
208{
209	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210	dp->context_ptr = bvec + 1;
211}
212
213static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
214{
215	dp->get_page = bvec_get_page;
216	dp->next_page = bvec_next_page;
217	dp->context_ptr = bvec;
218}
219
220static void vm_get_page(struct dpages *dp,
221		 struct page **p, unsigned long *len, unsigned *offset)
222{
223	*p = vmalloc_to_page(dp->context_ptr);
224	*offset = dp->context_u;
225	*len = PAGE_SIZE - dp->context_u;
226}
227
228static void vm_next_page(struct dpages *dp)
229{
230	dp->context_ptr += PAGE_SIZE - dp->context_u;
231	dp->context_u = 0;
232}
233
234static void vm_dp_init(struct dpages *dp, void *data)
235{
236	dp->get_page = vm_get_page;
237	dp->next_page = vm_next_page;
238	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
239	dp->context_ptr = data;
240}
241
242static void dm_bio_destructor(struct bio *bio)
243{
244	bio_free(bio, _bios);
245}
246
247/*-----------------------------------------------------------------
248 * IO routines that accept a list of pages.
249 *---------------------------------------------------------------*/
250static void do_region(int rw, unsigned int region, struct io_region *where,
251		      struct dpages *dp, struct io *io)
252{
253	struct bio *bio;
254	struct page *page;
255	unsigned long len;
256	unsigned offset;
257	unsigned num_bvecs;
258	sector_t remaining = where->count;
259
260	while (remaining) {
261		/*
262		 * Allocate a suitably sized bio, we add an extra
263		 * bvec for bio_get/set_region().
264		 */
265		num_bvecs = (remaining / (PAGE_SIZE >> 9)) + 2;
266		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
267		bio->bi_sector = where->sector + (where->count - remaining);
268		bio->bi_bdev = where->bdev;
269		bio->bi_end_io = endio;
270		bio->bi_private = io;
271		bio->bi_destructor = dm_bio_destructor;
272		bio_set_region(bio, region);
273
274		/*
275		 * Try and add as many pages as possible.
276		 */
277		while (remaining) {
278			dp->get_page(dp, &page, &len, &offset);
279			len = min(len, to_bytes(remaining));
280			if (!bio_add_page(bio, page, len, offset))
281				break;
282
283			offset = 0;
284			remaining -= to_sector(len);
285			dp->next_page(dp);
286		}
287
288		atomic_inc(&io->count);
289		submit_bio(rw, bio);
290	}
291}
292
293static void dispatch_io(int rw, unsigned int num_regions,
294			struct io_region *where, struct dpages *dp,
295			struct io *io, int sync)
296{
297	int i;
298	struct dpages old_pages = *dp;
299
300	if (sync)
301		rw |= (1 << BIO_RW_SYNC);
302
303	/*
304	 * For multiple regions we need to be careful to rewind
305	 * the dp object for each call to do_region.
306	 */
307	for (i = 0; i < num_regions; i++) {
308		*dp = old_pages;
309		if (where[i].count)
310			do_region(rw, i, where + i, dp, io);
311	}
312
313	/*
314	 * Drop the extra refence that we were holding to avoid
315	 * the io being completed too early.
316	 */
317	dec_count(io, 0, 0);
318}
319
320static int sync_io(unsigned int num_regions, struct io_region *where,
321	    int rw, struct dpages *dp, unsigned long *error_bits)
322{
323	struct io io;
324
325	if (num_regions > 1 && rw != WRITE) {
326		WARN_ON(1);
327		return -EIO;
328	}
329
330	io.error = 0;
331	atomic_set(&io.count, 1); /* see dispatch_io() */
332	io.sleeper = current;
333
334	dispatch_io(rw, num_regions, where, dp, &io, 1);
335
336	while (1) {
337		set_current_state(TASK_UNINTERRUPTIBLE);
338
339		if (!atomic_read(&io.count) || signal_pending(current))
340			break;
341
342		io_schedule();
343	}
344	set_current_state(TASK_RUNNING);
345
346	if (atomic_read(&io.count))
347		return -EINTR;
348
349	*error_bits = io.error;
350	return io.error ? -EIO : 0;
351}
352
353static int async_io(unsigned int num_regions, struct io_region *where, int rw,
354	     struct dpages *dp, io_notify_fn fn, void *context)
355{
356	struct io *io;
357
358	if (num_regions > 1 && rw != WRITE) {
359		WARN_ON(1);
360		fn(1, context);
361		return -EIO;
362	}
363
364	io = mempool_alloc(_io_pool, GFP_NOIO);
365	io->error = 0;
366	atomic_set(&io->count, 1); /* see dispatch_io() */
367	io->sleeper = NULL;
368	io->callback = fn;
369	io->context = context;
370
371	dispatch_io(rw, num_regions, where, dp, io, 0);
372	return 0;
373}
374
375int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
376	       struct page_list *pl, unsigned int offset,
377	       unsigned long *error_bits)
378{
379	struct dpages dp;
380	list_dp_init(&dp, pl, offset);
381	return sync_io(num_regions, where, rw, &dp, error_bits);
382}
383
384int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
385		    struct bio_vec *bvec, unsigned long *error_bits)
386{
387	struct dpages dp;
388	bvec_dp_init(&dp, bvec);
389	return sync_io(num_regions, where, rw, &dp, error_bits);
390}
391
392int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
393		  void *data, unsigned long *error_bits)
394{
395	struct dpages dp;
396	vm_dp_init(&dp, data);
397	return sync_io(num_regions, where, rw, &dp, error_bits);
398}
399
400int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
401		struct page_list *pl, unsigned int offset,
402		io_notify_fn fn, void *context)
403{
404	struct dpages dp;
405	list_dp_init(&dp, pl, offset);
406	return async_io(num_regions, where, rw, &dp, fn, context);
407}
408
409int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
410		     struct bio_vec *bvec, io_notify_fn fn, void *context)
411{
412	struct dpages dp;
413	bvec_dp_init(&dp, bvec);
414	return async_io(num_regions, where, rw, &dp, fn, context);
415}
416
417int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
418		   void *data, io_notify_fn fn, void *context)
419{
420	struct dpages dp;
421	vm_dp_init(&dp, data);
422	return async_io(num_regions, where, rw, &dp, fn, context);
423}
424
425EXPORT_SYMBOL(dm_io_get);
426EXPORT_SYMBOL(dm_io_put);
427EXPORT_SYMBOL(dm_io_sync);
428EXPORT_SYMBOL(dm_io_async);
429EXPORT_SYMBOL(dm_io_sync_bvec);
430EXPORT_SYMBOL(dm_io_async_bvec);
431EXPORT_SYMBOL(dm_io_sync_vm);
432EXPORT_SYMBOL(dm_io_async_vm);
433