1/*
2 * sg engine
3 *
4 * IO engine that uses the Linux SG v3 interface to talk to SCSI devices
5 *
6 */
7#include <stdio.h>
8#include <stdlib.h>
9#include <unistd.h>
10#include <errno.h>
11#include <assert.h>
12#include <sys/poll.h>
13
14#include "../fio.h"
15
16#ifdef FIO_HAVE_SGIO
17
18struct sgio_cmd {
19	unsigned char cdb[10];
20	int nr;
21};
22
23struct sgio_data {
24	struct sgio_cmd *cmds;
25	struct io_u **events;
26	struct pollfd *pfds;
27	int *fd_flags;
28	void *sgbuf;
29	unsigned int bs;
30	int type_checked;
31};
32
33static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
34			  struct io_u *io_u, int fs)
35{
36	struct sgio_cmd *sc = &sd->cmds[io_u->index];
37
38	memset(hdr, 0, sizeof(*hdr));
39	memset(sc->cdb, 0, sizeof(sc->cdb));
40
41	hdr->interface_id = 'S';
42	hdr->cmdp = sc->cdb;
43	hdr->cmd_len = sizeof(sc->cdb);
44	hdr->pack_id = io_u->index;
45	hdr->usr_ptr = io_u;
46
47	if (fs) {
48		hdr->dxferp = io_u->xfer_buf;
49		hdr->dxfer_len = io_u->xfer_buflen;
50	}
51}
52
53static int pollin_events(struct pollfd *pfds, int fds)
54{
55	int i;
56
57	for (i = 0; i < fds; i++)
58		if (pfds[i].revents & POLLIN)
59			return 1;
60
61	return 0;
62}
63
64static int fio_sgio_getevents(struct thread_data *td, unsigned int min,
65			      unsigned int max,
66			      const struct timespec fio_unused *t)
67{
68	struct sgio_data *sd = td->io_ops->data;
69	int left = max, ret, r = 0;
70	void *buf = sd->sgbuf;
71	unsigned int i, events;
72	struct fio_file *f;
73
74	/*
75	 * Fill in the file descriptors
76	 */
77	for_each_file(td, f, i) {
78		/*
79		 * don't block for min events == 0
80		 */
81		if (!min)
82			sd->fd_flags[i] = fio_set_fd_nonblocking(f->fd, "sg");
83		else
84			sd->fd_flags[i] = -1;
85
86		sd->pfds[i].fd = f->fd;
87		sd->pfds[i].events = POLLIN;
88	}
89
90	while (left) {
91		void *p;
92
93		do {
94			if (!min)
95				break;
96
97			ret = poll(sd->pfds, td->o.nr_files, -1);
98			if (ret < 0) {
99				if (!r)
100					r = -errno;
101				td_verror(td, errno, "poll");
102				break;
103			} else if (!ret)
104				continue;
105
106			if (pollin_events(sd->pfds, td->o.nr_files))
107				break;
108		} while (1);
109
110		if (r < 0)
111			break;
112
113re_read:
114		p = buf;
115		events = 0;
116		for_each_file(td, f, i) {
117			ret = read(f->fd, p, left * sizeof(struct sg_io_hdr));
118			if (ret < 0) {
119				if (errno == EAGAIN)
120					continue;
121				r = -errno;
122				td_verror(td, errno, "read");
123				break;
124			} else if (ret) {
125				p += ret;
126				events += ret / sizeof(struct sg_io_hdr);
127			}
128		}
129
130		if (r < 0)
131			break;
132		if (!events) {
133			usleep(1000);
134			goto re_read;
135		}
136
137		left -= events;
138		r += events;
139
140		for (i = 0; i < events; i++) {
141			struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
142
143			sd->events[i] = hdr->usr_ptr;
144		}
145	}
146
147	if (!min) {
148		for_each_file(td, f, i) {
149			if (sd->fd_flags[i] == -1)
150				continue;
151
152			if (fcntl(f->fd, F_SETFL, sd->fd_flags[i]) < 0)
153				log_err("fio: sg failed to restore fcntl flags: %s\n", strerror(errno));
154		}
155	}
156
157	return r;
158}
159
160static int fio_sgio_ioctl_doio(struct thread_data *td,
161			       struct fio_file *f, struct io_u *io_u)
162{
163	struct sgio_data *sd = td->io_ops->data;
164	struct sg_io_hdr *hdr = &io_u->hdr;
165	int ret;
166
167	sd->events[0] = io_u;
168
169	ret = ioctl(f->fd, SG_IO, hdr);
170	if (ret < 0)
171		return ret;
172
173	return FIO_Q_COMPLETED;
174}
175
176static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int do_sync)
177{
178	struct sg_io_hdr *hdr = &io_u->hdr;
179	int ret;
180
181	ret = write(f->fd, hdr, sizeof(*hdr));
182	if (ret < 0)
183		return ret;
184
185	if (do_sync) {
186		ret = read(f->fd, hdr, sizeof(*hdr));
187		if (ret < 0)
188			return ret;
189		return FIO_Q_COMPLETED;
190	}
191
192	return FIO_Q_QUEUED;
193}
194
195static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int do_sync)
196{
197	struct fio_file *f = io_u->file;
198
199	if (f->filetype == FIO_TYPE_BD)
200		return fio_sgio_ioctl_doio(td, f, io_u);
201
202	return fio_sgio_rw_doio(f, io_u, do_sync);
203}
204
205static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
206{
207	struct sg_io_hdr *hdr = &io_u->hdr;
208	struct sgio_data *sd = td->io_ops->data;
209	int nr_blocks, lba;
210
211	if (io_u->xfer_buflen & (sd->bs - 1)) {
212		log_err("read/write not sector aligned\n");
213		return EINVAL;
214	}
215
216	if (io_u->ddir == DDIR_READ) {
217		sgio_hdr_init(sd, hdr, io_u, 1);
218
219		hdr->dxfer_direction = SG_DXFER_FROM_DEV;
220		hdr->cmdp[0] = 0x28;
221	} else if (io_u->ddir == DDIR_WRITE) {
222		sgio_hdr_init(sd, hdr, io_u, 1);
223
224		hdr->dxfer_direction = SG_DXFER_TO_DEV;
225		hdr->cmdp[0] = 0x2a;
226	} else {
227		sgio_hdr_init(sd, hdr, io_u, 0);
228
229		hdr->dxfer_direction = SG_DXFER_NONE;
230		hdr->cmdp[0] = 0x35;
231	}
232
233	if (hdr->dxfer_direction != SG_DXFER_NONE) {
234		nr_blocks = io_u->xfer_buflen / sd->bs;
235		lba = io_u->offset / sd->bs;
236		hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
237		hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
238		hdr->cmdp[4] = (unsigned char) ((lba >>  8) & 0xff);
239		hdr->cmdp[5] = (unsigned char) (lba & 0xff);
240		hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
241		hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
242	}
243
244	return 0;
245}
246
247static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
248{
249	struct sg_io_hdr *hdr = &io_u->hdr;
250	int ret, do_sync = 0;
251
252	fio_ro_check(td, io_u);
253
254	if (td->o.sync_io || td->o.odirect || ddir_sync(io_u->ddir))
255		do_sync = 1;
256
257	ret = fio_sgio_doio(td, io_u, do_sync);
258
259	if (ret < 0)
260		io_u->error = errno;
261	else if (hdr->status) {
262		io_u->resid = hdr->resid;
263		io_u->error = EIO;
264	}
265
266	if (io_u->error) {
267		td_verror(td, io_u->error, "xfer");
268		return FIO_Q_COMPLETED;
269	}
270
271	return ret;
272}
273
274static struct io_u *fio_sgio_event(struct thread_data *td, int event)
275{
276	struct sgio_data *sd = td->io_ops->data;
277
278	return sd->events[event];
279}
280
281static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
282{
283	struct sgio_data *sd = td->io_ops->data;
284	struct io_u io_u;
285	struct sg_io_hdr *hdr;
286	unsigned char buf[8];
287	int ret;
288
289	memset(&io_u, 0, sizeof(io_u));
290	io_u.file = td->files[0];
291
292	hdr = &io_u.hdr;
293	sgio_hdr_init(sd, hdr, &io_u, 0);
294	memset(buf, 0, sizeof(buf));
295
296	hdr->cmdp[0] = 0x25;
297	hdr->dxfer_direction = SG_DXFER_FROM_DEV;
298	hdr->dxferp = buf;
299	hdr->dxfer_len = sizeof(buf);
300
301	ret = fio_sgio_doio(td, &io_u, 1);
302	if (ret)
303		return ret;
304
305	*bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
306	return 0;
307}
308
309static void fio_sgio_cleanup(struct thread_data *td)
310{
311	struct sgio_data *sd = td->io_ops->data;
312
313	if (sd) {
314		free(sd->events);
315		free(sd->cmds);
316		free(sd->fd_flags);
317		free(sd->pfds);
318		free(sd->sgbuf);
319		free(sd);
320	}
321}
322
323static int fio_sgio_init(struct thread_data *td)
324{
325	struct sgio_data *sd;
326
327	sd = malloc(sizeof(*sd));
328	memset(sd, 0, sizeof(*sd));
329	sd->cmds = malloc(td->o.iodepth * sizeof(struct sgio_cmd));
330	memset(sd->cmds, 0, td->o.iodepth * sizeof(struct sgio_cmd));
331	sd->events = malloc(td->o.iodepth * sizeof(struct io_u *));
332	memset(sd->events, 0, td->o.iodepth * sizeof(struct io_u *));
333	sd->pfds = malloc(sizeof(struct pollfd) * td->o.nr_files);
334	memset(sd->pfds, 0, sizeof(struct pollfd) * td->o.nr_files);
335	sd->fd_flags = malloc(sizeof(int) * td->o.nr_files);
336	memset(sd->fd_flags, 0, sizeof(int) * td->o.nr_files);
337	sd->sgbuf = malloc(sizeof(struct sg_io_hdr) * td->o.iodepth);
338	memset(sd->sgbuf, 0, sizeof(struct sg_io_hdr) * td->o.iodepth);
339
340	td->io_ops->data = sd;
341
342	/*
343	 * we want to do it, regardless of whether odirect is set or not
344	 */
345	td->o.override_sync = 1;
346	return 0;
347}
348
349static int fio_sgio_type_check(struct thread_data *td, struct fio_file *f)
350{
351	struct sgio_data *sd = td->io_ops->data;
352	unsigned int bs;
353
354	if (f->filetype == FIO_TYPE_BD) {
355		if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
356			td_verror(td, errno, "ioctl");
357			return 1;
358		}
359	} else if (f->filetype == FIO_TYPE_CHAR) {
360		int version, ret;
361
362		if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
363			td_verror(td, errno, "ioctl");
364			return 1;
365		}
366
367		ret = fio_sgio_get_bs(td, &bs);
368		if (ret)
369			return 1;
370	} else {
371		log_err("ioengine sg only works on block devices\n");
372		return 1;
373	}
374
375	sd->bs = bs;
376
377	if (f->filetype == FIO_TYPE_BD) {
378		td->io_ops->getevents = NULL;
379		td->io_ops->event = NULL;
380	}
381
382	return 0;
383}
384
385static int fio_sgio_open(struct thread_data *td, struct fio_file *f)
386{
387	struct sgio_data *sd = td->io_ops->data;
388	int ret;
389
390	ret = generic_open_file(td, f);
391	if (ret)
392		return ret;
393
394	if (sd && !sd->type_checked && fio_sgio_type_check(td, f)) {
395		ret = generic_close_file(td, f);
396		return 1;
397	}
398
399	return 0;
400}
401
402static struct ioengine_ops ioengine = {
403	.name		= "sg",
404	.version	= FIO_IOOPS_VERSION,
405	.init		= fio_sgio_init,
406	.prep		= fio_sgio_prep,
407	.queue		= fio_sgio_queue,
408	.getevents	= fio_sgio_getevents,
409	.event		= fio_sgio_event,
410	.cleanup	= fio_sgio_cleanup,
411	.open_file	= fio_sgio_open,
412	.close_file	= generic_close_file,
413	.get_file_size	= generic_get_file_size,
414	.flags		= FIO_SYNCIO | FIO_RAWIO,
415};
416
417#else /* FIO_HAVE_SGIO */
418
419/*
420 * When we have a proper configure system in place, we simply wont build
421 * and install this io engine. For now install a crippled version that
422 * just complains and fails to load.
423 */
424static int fio_sgio_init(struct thread_data fio_unused *td)
425{
426	log_err("fio: ioengine sg not available\n");
427	return 1;
428}
429
430static struct ioengine_ops ioengine = {
431	.name		= "sg",
432	.version	= FIO_IOOPS_VERSION,
433	.init		= fio_sgio_init,
434};
435
436#endif
437
438static void fio_init fio_sgio_register(void)
439{
440	register_ioengine(&ioengine);
441}
442
443static void fio_exit fio_sgio_unregister(void)
444{
445	unregister_ioengine(&ioengine);
446}
447