splice.c revision a5f3027cb0495dfe217b2626d248fcc054e7e878
1/*
2 * splice engine
3 *
4 * IO engine that transfers data by doing splices to/from pipes and
5 * the files.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <sys/poll.h>
14#include <sys/mman.h>
15
16#include "../fio.h"
17
18#ifdef FIO_HAVE_SPLICE
19
20struct spliceio_data {
21	int pipe[2];
22	int vmsplice_to_user;
23	int vmsplice_to_user_map;
24};
25
26/*
27 * vmsplice didn't use to support splicing to user space, this is the old
28 * variant of getting that job done. Doesn't make a lot of sense, but it
29 * uses splices to move data from the source into a pipe.
30 */
31static int fio_splice_read_old(struct thread_data *td, struct io_u *io_u)
32{
33	struct spliceio_data *sd = td->io_ops->data;
34	struct fio_file *f = io_u->file;
35	int ret, ret2, buflen;
36	off_t offset;
37	void *p;
38
39	offset = io_u->offset;
40	buflen = io_u->xfer_buflen;
41	p = io_u->xfer_buf;
42	while (buflen) {
43		int this_len = buflen;
44
45		if (this_len > SPLICE_DEF_SIZE)
46			this_len = SPLICE_DEF_SIZE;
47
48		ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
49		if (ret < 0) {
50			if (errno == ENODATA || errno == EAGAIN)
51				continue;
52
53			return -errno;
54		}
55
56		buflen -= ret;
57
58		while (ret) {
59			ret2 = read(sd->pipe[0], p, ret);
60			if (ret2 < 0)
61				return -errno;
62
63			ret -= ret2;
64			p += ret2;
65		}
66	}
67
68	return io_u->xfer_buflen;
69}
70
71/*
72 * We can now vmsplice into userspace, so do the transfer by splicing into
73 * a pipe and vmsplicing that into userspace.
74 */
75static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
76{
77	struct spliceio_data *sd = td->io_ops->data;
78	struct fio_file *f = io_u->file;
79	struct iovec iov;
80	int ret , buflen, mmap_len;
81	off_t offset;
82	void *p, *map;
83
84	ret = 0;
85	offset = io_u->offset;
86	mmap_len = buflen = io_u->xfer_buflen;
87
88	if (sd->vmsplice_to_user_map) {
89		map = mmap(io_u->xfer_buf, buflen, PROT_READ, MAP_PRIVATE|OS_MAP_ANON, 0, 0);
90		if (map == MAP_FAILED) {
91			td_verror(td, errno, "mmap io_u");
92			return -1;
93		}
94
95		p = map;
96	} else {
97		map = NULL;
98		p = io_u->xfer_buf;
99	}
100
101	while (buflen) {
102		int this_len = buflen;
103		int flags = 0;
104
105		if (this_len > SPLICE_DEF_SIZE) {
106			this_len = SPLICE_DEF_SIZE;
107			flags = SPLICE_F_MORE;
108		}
109
110		ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len,flags);
111		if (ret < 0) {
112			if (errno == ENODATA || errno == EAGAIN)
113				continue;
114
115			td_verror(td, errno, "splice-from-fd");
116			break;
117		}
118
119		buflen -= ret;
120		iov.iov_base = p;
121		iov.iov_len = ret;
122
123		while (iov.iov_len) {
124			ret = vmsplice(sd->pipe[0], &iov, 1, SPLICE_F_MOVE);
125			if (ret < 0) {
126				if (errno == EFAULT &&
127				    sd->vmsplice_to_user_map) {
128					sd->vmsplice_to_user_map = 0;
129					munmap(map, mmap_len);
130					map = NULL;
131					p = io_u->xfer_buf;
132					iov.iov_base = p;
133					continue;
134				}
135				if (errno == EBADF) {
136					ret = -EBADF;
137					break;
138				}
139				td_verror(td, errno, "vmsplice");
140				break;
141			} else if (!ret) {
142				td_verror(td, ENODATA, "vmsplice");
143				ret = -1;
144				break;
145			}
146
147			iov.iov_len -= ret;
148			iov.iov_base += ret;
149			p += ret;
150		}
151		if (ret < 0)
152			break;
153	}
154
155	if (sd->vmsplice_to_user_map && munmap(map, mmap_len) < 0) {
156		td_verror(td, errno, "munnap io_u");
157		return -1;
158	}
159	if (ret < 0)
160		return ret;
161
162	return io_u->xfer_buflen;
163}
164
165/*
166 * For splice writing, we can vmsplice our data buffer directly into a
167 * pipe and then splice that to a file.
168 */
169static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
170{
171	struct spliceio_data *sd = td->io_ops->data;
172	struct iovec iov = {
173		.iov_base = io_u->xfer_buf,
174		.iov_len = io_u->xfer_buflen,
175	};
176	struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
177	struct fio_file *f = io_u->file;
178	off_t off = io_u->offset;
179	int ret, ret2;
180
181	while (iov.iov_len) {
182		if (poll(&pfd, 1, -1) < 0)
183			return errno;
184
185		ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
186		if (ret < 0)
187			return -errno;
188
189		iov.iov_len -= ret;
190		iov.iov_base += ret;
191
192		while (ret) {
193			ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
194			if (ret2 < 0)
195				return -errno;
196
197			ret -= ret2;
198		}
199	}
200
201	return io_u->xfer_buflen;
202}
203
204static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
205{
206	struct spliceio_data *sd = td->io_ops->data;
207	int uninitialized_var(ret);
208
209	fio_ro_check(td, io_u);
210
211	if (io_u->ddir == DDIR_READ) {
212		if (sd->vmsplice_to_user) {
213			ret = fio_splice_read(td, io_u);
214			/*
215			 * This kernel doesn't support vmsplice to user
216			 * space. Reset the vmsplice_to_user flag, so that
217			 * we retry below and don't hit this path again.
218			 */
219			if (ret == -EBADF)
220				sd->vmsplice_to_user = 0;
221		}
222		if (!sd->vmsplice_to_user)
223			ret = fio_splice_read_old(td, io_u);
224	} else if (io_u->ddir == DDIR_WRITE)
225		ret = fio_splice_write(td, io_u);
226	else if (io_u->ddir == DDIR_TRIM)
227		ret = do_io_u_trim(td, io_u);
228	else
229		ret = do_io_u_sync(td, io_u);
230
231	if (ret != (int) io_u->xfer_buflen) {
232		if (ret >= 0) {
233			io_u->resid = io_u->xfer_buflen - ret;
234			io_u->error = 0;
235			return FIO_Q_COMPLETED;
236		} else
237			io_u->error = errno;
238	}
239
240	if (io_u->error) {
241		td_verror(td, io_u->error, "xfer");
242		if (io_u->error == EINVAL)
243			log_err("fio: looks like splice doesn't work on this"
244					" file system\n");
245	}
246
247	return FIO_Q_COMPLETED;
248}
249
250static void fio_spliceio_cleanup(struct thread_data *td)
251{
252	struct spliceio_data *sd = td->io_ops->data;
253
254	if (sd) {
255		close(sd->pipe[0]);
256		close(sd->pipe[1]);
257		free(sd);
258	}
259}
260
261static int fio_spliceio_init(struct thread_data *td)
262{
263	struct spliceio_data *sd = malloc(sizeof(*sd));
264
265	if (pipe(sd->pipe) < 0) {
266		td_verror(td, errno, "pipe");
267		free(sd);
268		return 1;
269	}
270
271	/*
272	 * Assume this work, we'll reset this if it doesn't
273	 */
274	sd->vmsplice_to_user = 1;
275
276	/*
277	 * Works with "real" vmsplice to user, eg mapping pages directly.
278	 * Reset if we fail.
279	 */
280	sd->vmsplice_to_user_map = 1;
281
282	/*
283	 * And if vmsplice_to_user works, we definitely need aligned
284	 * buffers. Just set ->odirect to force that.
285	 */
286	if (td_read(td))
287		td->o.odirect = 1;
288
289	td->io_ops->data = sd;
290	return 0;
291}
292
293static struct ioengine_ops ioengine = {
294	.name		= "splice",
295	.version	= FIO_IOOPS_VERSION,
296	.init		= fio_spliceio_init,
297	.queue		= fio_spliceio_queue,
298	.cleanup	= fio_spliceio_cleanup,
299	.open_file	= generic_open_file,
300	.close_file	= generic_close_file,
301	.get_file_size	= generic_get_file_size,
302	.flags		= FIO_SYNCIO | FIO_PIPEIO,
303};
304
305#else /* FIO_HAVE_SPLICE */
306
307/*
308 * When we have a proper configure system in place, we simply wont build
309 * and install this io engine. For now install a crippled version that
310 * just complains and fails to load.
311 */
312static int fio_spliceio_init(struct thread_data fio_unused *td)
313{
314	fprintf(stderr, "fio: splice not available\n");
315	return 1;
316}
317
318static struct ioengine_ops ioengine = {
319	.name		= "splice",
320	.version	= FIO_IOOPS_VERSION,
321	.init		= fio_spliceio_init,
322};
323
324#endif
325
326static void fio_init fio_spliceio_register(void)
327{
328	register_ioengine(&ioengine);
329}
330
331static void fio_exit fio_spliceio_unregister(void)
332{
333	unregister_ioengine(&ioengine);
334}
335