1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/completion.h>
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dm-io.h>
19
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS	BITS_PER_LONG
23
24struct dm_io_client {
25	mempool_t *pool;
26	struct bio_set *bios;
27};
28
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address.  Refer to store_io_and_region_in_bio() below.
32 */
33struct io {
34	unsigned long error_bits;
35	atomic_t count;
36	struct dm_io_client *client;
37	io_notify_fn callback;
38	void *context;
39	void *vma_invalidate_address;
40	unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42
43static struct kmem_cache *_dm_io_cache;
44
45/*
46 * Create a client with mempool and bioset.
47 */
48struct dm_io_client *dm_io_client_create(void)
49{
50	struct dm_io_client *client;
51	unsigned min_ios = dm_get_reserved_bio_based_ios();
52
53	client = kmalloc(sizeof(*client), GFP_KERNEL);
54	if (!client)
55		return ERR_PTR(-ENOMEM);
56
57	client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58	if (!client->pool)
59		goto bad;
60
61	client->bios = bioset_create(min_ios, 0);
62	if (!client->bios)
63		goto bad;
64
65	return client;
66
67   bad:
68	if (client->pool)
69		mempool_destroy(client->pool);
70	kfree(client);
71	return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
75void dm_io_client_destroy(struct dm_io_client *client)
76{
77	mempool_destroy(client->pool);
78	bioset_free(client->bios);
79	kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
83/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
89 *---------------------------------------------------------------*/
90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91				       unsigned region)
92{
93	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94		DMCRIT("Unaligned struct io pointer %p", io);
95		BUG();
96	}
97
98	bio->bi_private = (void *)((unsigned long)io | region);
99}
100
101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102				       unsigned *region)
103{
104	unsigned long val = (unsigned long)bio->bi_private;
105
106	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107	*region = val & (DM_IO_MAX_REGIONS - 1);
108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114static void complete_io(struct io *io)
115{
116	unsigned long error_bits = io->error_bits;
117	io_notify_fn fn = io->callback;
118	void *context = io->context;
119
120	if (io->vma_invalidate_size)
121		invalidate_kernel_vmap_range(io->vma_invalidate_address,
122					     io->vma_invalidate_size);
123
124	mempool_free(io, io->client->pool);
125	fn(error_bits, context);
126}
127
128static void dec_count(struct io *io, unsigned int region, int error)
129{
130	if (error)
131		set_bit(region, &io->error_bits);
132
133	if (atomic_dec_and_test(&io->count))
134		complete_io(io);
135}
136
137static void endio(struct bio *bio, int error)
138{
139	struct io *io;
140	unsigned region;
141
142	if (error && bio_data_dir(bio) == READ)
143		zero_fill_bio(bio);
144
145	/*
146	 * The bio destructor in bio_put() may use the io object.
147	 */
148	retrieve_io_and_region_from_bio(bio, &io, &region);
149
150	bio_put(bio);
151
152	dec_count(io, region, error);
153}
154
155/*-----------------------------------------------------------------
156 * These little objects provide an abstraction for getting a new
157 * destination page for io.
158 *---------------------------------------------------------------*/
159struct dpages {
160	void (*get_page)(struct dpages *dp,
161			 struct page **p, unsigned long *len, unsigned *offset);
162	void (*next_page)(struct dpages *dp);
163
164	unsigned context_u;
165	void *context_ptr;
166
167	void *vma_invalidate_address;
168	unsigned long vma_invalidate_size;
169};
170
171/*
172 * Functions for getting the pages from a list.
173 */
174static void list_get_page(struct dpages *dp,
175		  struct page **p, unsigned long *len, unsigned *offset)
176{
177	unsigned o = dp->context_u;
178	struct page_list *pl = (struct page_list *) dp->context_ptr;
179
180	*p = pl->page;
181	*len = PAGE_SIZE - o;
182	*offset = o;
183}
184
185static void list_next_page(struct dpages *dp)
186{
187	struct page_list *pl = (struct page_list *) dp->context_ptr;
188	dp->context_ptr = pl->next;
189	dp->context_u = 0;
190}
191
192static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
193{
194	dp->get_page = list_get_page;
195	dp->next_page = list_next_page;
196	dp->context_u = offset;
197	dp->context_ptr = pl;
198}
199
200/*
201 * Functions for getting the pages from a bvec.
202 */
203static void bio_get_page(struct dpages *dp, struct page **p,
204			 unsigned long *len, unsigned *offset)
205{
206	struct bio_vec *bvec = dp->context_ptr;
207	*p = bvec->bv_page;
208	*len = bvec->bv_len - dp->context_u;
209	*offset = bvec->bv_offset + dp->context_u;
210}
211
212static void bio_next_page(struct dpages *dp)
213{
214	struct bio_vec *bvec = dp->context_ptr;
215	dp->context_ptr = bvec + 1;
216	dp->context_u = 0;
217}
218
219static void bio_dp_init(struct dpages *dp, struct bio *bio)
220{
221	dp->get_page = bio_get_page;
222	dp->next_page = bio_next_page;
223	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
224	dp->context_u = bio->bi_iter.bi_bvec_done;
225}
226
227/*
228 * Functions for getting the pages from a VMA.
229 */
230static void vm_get_page(struct dpages *dp,
231		 struct page **p, unsigned long *len, unsigned *offset)
232{
233	*p = vmalloc_to_page(dp->context_ptr);
234	*offset = dp->context_u;
235	*len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240	dp->context_ptr += PAGE_SIZE - dp->context_u;
241	dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246	dp->get_page = vm_get_page;
247	dp->next_page = vm_next_page;
248	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249	dp->context_ptr = data;
250}
251
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256			unsigned *offset)
257{
258	*p = virt_to_page(dp->context_ptr);
259	*offset = dp->context_u;
260	*len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265	dp->context_ptr += PAGE_SIZE - dp->context_u;
266	dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271	dp->get_page = km_get_page;
272	dp->next_page = km_next_page;
273	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274	dp->context_ptr = data;
275}
276
277/*-----------------------------------------------------------------
278 * IO routines that accept a list of pages.
279 *---------------------------------------------------------------*/
280static void do_region(int rw, unsigned region, struct dm_io_region *where,
281		      struct dpages *dp, struct io *io)
282{
283	struct bio *bio;
284	struct page *page;
285	unsigned long len;
286	unsigned offset;
287	unsigned num_bvecs;
288	sector_t remaining = where->count;
289	struct request_queue *q = bdev_get_queue(where->bdev);
290	unsigned short logical_block_size = queue_logical_block_size(q);
291	sector_t num_sectors;
292
293	/*
294	 * where->count may be zero if rw holds a flush and we need to
295	 * send a zero-sized flush.
296	 */
297	do {
298		/*
299		 * Allocate a suitably sized-bio.
300		 */
301		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
302			num_bvecs = 1;
303		else
304			num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
305					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
306
307		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
308		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
309		bio->bi_bdev = where->bdev;
310		bio->bi_end_io = endio;
311		store_io_and_region_in_bio(bio, io, region);
312
313		if (rw & REQ_DISCARD) {
314			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
315			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
316			remaining -= num_sectors;
317		} else if (rw & REQ_WRITE_SAME) {
318			/*
319			 * WRITE SAME only uses a single page.
320			 */
321			dp->get_page(dp, &page, &len, &offset);
322			bio_add_page(bio, page, logical_block_size, offset);
323			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
324			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
325
326			offset = 0;
327			remaining -= num_sectors;
328			dp->next_page(dp);
329		} else while (remaining) {
330			/*
331			 * Try and add as many pages as possible.
332			 */
333			dp->get_page(dp, &page, &len, &offset);
334			len = min(len, to_bytes(remaining));
335			if (!bio_add_page(bio, page, len, offset))
336				break;
337
338			offset = 0;
339			remaining -= to_sector(len);
340			dp->next_page(dp);
341		}
342
343		atomic_inc(&io->count);
344		submit_bio(rw, bio);
345	} while (remaining);
346}
347
348static void dispatch_io(int rw, unsigned int num_regions,
349			struct dm_io_region *where, struct dpages *dp,
350			struct io *io, int sync)
351{
352	int i;
353	struct dpages old_pages = *dp;
354
355	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
356
357	if (sync)
358		rw |= REQ_SYNC;
359
360	/*
361	 * For multiple regions we need to be careful to rewind
362	 * the dp object for each call to do_region.
363	 */
364	for (i = 0; i < num_regions; i++) {
365		*dp = old_pages;
366		if (where[i].count || (rw & REQ_FLUSH))
367			do_region(rw, i, where + i, dp, io);
368	}
369
370	/*
371	 * Drop the extra reference that we were holding to avoid
372	 * the io being completed too early.
373	 */
374	dec_count(io, 0, 0);
375}
376
377struct sync_io {
378	unsigned long error_bits;
379	struct completion wait;
380};
381
382static void sync_io_complete(unsigned long error, void *context)
383{
384	struct sync_io *sio = context;
385
386	sio->error_bits = error;
387	complete(&sio->wait);
388}
389
390static int sync_io(struct dm_io_client *client, unsigned int num_regions,
391		   struct dm_io_region *where, int rw, struct dpages *dp,
392		   unsigned long *error_bits)
393{
394	struct io *io;
395	struct sync_io sio;
396
397	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
398		WARN_ON(1);
399		return -EIO;
400	}
401
402	init_completion(&sio.wait);
403
404	io = mempool_alloc(client->pool, GFP_NOIO);
405	io->error_bits = 0;
406	atomic_set(&io->count, 1); /* see dispatch_io() */
407	io->client = client;
408	io->callback = sync_io_complete;
409	io->context = &sio;
410
411	io->vma_invalidate_address = dp->vma_invalidate_address;
412	io->vma_invalidate_size = dp->vma_invalidate_size;
413
414	dispatch_io(rw, num_regions, where, dp, io, 1);
415
416	wait_for_completion_io(&sio.wait);
417
418	if (error_bits)
419		*error_bits = sio.error_bits;
420
421	return sio.error_bits ? -EIO : 0;
422}
423
424static int async_io(struct dm_io_client *client, unsigned int num_regions,
425		    struct dm_io_region *where, int rw, struct dpages *dp,
426		    io_notify_fn fn, void *context)
427{
428	struct io *io;
429
430	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
431		WARN_ON(1);
432		fn(1, context);
433		return -EIO;
434	}
435
436	io = mempool_alloc(client->pool, GFP_NOIO);
437	io->error_bits = 0;
438	atomic_set(&io->count, 1); /* see dispatch_io() */
439	io->client = client;
440	io->callback = fn;
441	io->context = context;
442
443	io->vma_invalidate_address = dp->vma_invalidate_address;
444	io->vma_invalidate_size = dp->vma_invalidate_size;
445
446	dispatch_io(rw, num_regions, where, dp, io, 0);
447	return 0;
448}
449
450static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
451		   unsigned long size)
452{
453	/* Set up dpages based on memory type */
454
455	dp->vma_invalidate_address = NULL;
456	dp->vma_invalidate_size = 0;
457
458	switch (io_req->mem.type) {
459	case DM_IO_PAGE_LIST:
460		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
461		break;
462
463	case DM_IO_BIO:
464		bio_dp_init(dp, io_req->mem.ptr.bio);
465		break;
466
467	case DM_IO_VMA:
468		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
469		if ((io_req->bi_rw & RW_MASK) == READ) {
470			dp->vma_invalidate_address = io_req->mem.ptr.vma;
471			dp->vma_invalidate_size = size;
472		}
473		vm_dp_init(dp, io_req->mem.ptr.vma);
474		break;
475
476	case DM_IO_KMEM:
477		km_dp_init(dp, io_req->mem.ptr.addr);
478		break;
479
480	default:
481		return -EINVAL;
482	}
483
484	return 0;
485}
486
487/*
488 * New collapsed (a)synchronous interface.
489 *
490 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
491 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
492 * If you fail to do one of these, the IO will be submitted to the disk after
493 * q->unplug_delay, which defaults to 3ms in blk-settings.c.
494 */
495int dm_io(struct dm_io_request *io_req, unsigned num_regions,
496	  struct dm_io_region *where, unsigned long *sync_error_bits)
497{
498	int r;
499	struct dpages dp;
500
501	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
502	if (r)
503		return r;
504
505	if (!io_req->notify.fn)
506		return sync_io(io_req->client, num_regions, where,
507			       io_req->bi_rw, &dp, sync_error_bits);
508
509	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
510			&dp, io_req->notify.fn, io_req->notify.context);
511}
512EXPORT_SYMBOL(dm_io);
513
514int __init dm_io_init(void)
515{
516	_dm_io_cache = KMEM_CACHE(io, 0);
517	if (!_dm_io_cache)
518		return -ENOMEM;
519
520	return 0;
521}
522
523void dm_io_exit(void)
524{
525	kmem_cache_destroy(_dm_io_cache);
526	_dm_io_cache = NULL;
527}
528