io_u.c revision 2ab9e98b300c35d3b7807f74d404ce3c5de33fb3
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
6#include <assert.h>
7
8#include "fio.h"
9#include "hash.h"
10#include "verify.h"
11#include "trim.h"
12#include "lib/rand.h"
13#include "lib/bitmap.h"
14
15struct io_completion_data {
16	int nr;				/* input */
17
18	int error;			/* output */
19	unsigned long bytes_done[DDIR_RWDIR_CNT];	/* output */
20	struct timeval time;		/* output */
21};
22
23/*
24 * The ->io_bitmap contains a map of blocks we have or have not done io
25 * to yet. Used to make sure we cover the entire range in a fair fashion.
26 */
27static int random_map_free(struct fio_file *f, const unsigned long long block)
28{
29	return !bitmap_isset(f->io_bitmap, block);
30}
31
32/*
33 * Mark a given offset as used in the map.
34 */
35static void mark_random_map(struct thread_data *td, struct io_u *io_u)
36{
37	unsigned int min_bs = td->o.rw_min_bs;
38	struct fio_file *f = io_u->file;
39	unsigned long long block;
40	unsigned int nr_blocks;
41
42	block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
43	nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
44
45	if (!(io_u->flags & IO_U_F_BUSY_OK))
46		nr_blocks = bitmap_set_nr(f->io_bitmap, block, nr_blocks);
47
48	if ((nr_blocks * min_bs) < io_u->buflen)
49		io_u->buflen = nr_blocks * min_bs;
50}
51
52static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
53				     enum fio_ddir ddir)
54{
55	unsigned long long max_blocks;
56	unsigned long long max_size;
57
58	assert(ddir_rw(ddir));
59
60	/*
61	 * Hmm, should we make sure that ->io_size <= ->real_file_size?
62	 */
63	max_size = f->io_size;
64	if (max_size > f->real_file_size)
65		max_size = f->real_file_size;
66
67	if (td->o.zone_range)
68		max_size = td->o.zone_range;
69
70	max_blocks = max_size / (unsigned long long) td->o.ba[ddir];
71	if (!max_blocks)
72		return 0;
73
74	return max_blocks;
75}
76
77static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f,
78				  enum fio_ddir ddir, unsigned long long *b)
79{
80	unsigned long long rmax, r, lastb;
81
82	lastb = last_block(td, f, ddir);
83	if (!lastb)
84		return 1;
85
86	rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
87
88	if (td->o.use_os_rand) {
89		rmax = OS_RAND_MAX;
90		r = os_random_long(&td->random_state);
91	} else {
92		rmax = FRAND_MAX;
93		r = __rand(&td->__random_state);
94	}
95
96	*b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
97
98	dprint(FD_RANDOM, "off rand %llu\n", r);
99
100	/*
101	 * if we are not maintaining a random map, we are done.
102	 */
103	if (!file_randommap(td, f))
104		goto ret;
105
106	/*
107	 * calculate map offset and check if it's free
108	 */
109	if (random_map_free(f, *b))
110		goto ret;
111
112	dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b);
113
114	*b = bitmap_next_free(f->io_bitmap, *b);
115	if (*b == (uint64_t) -1ULL)
116		return 1;
117ret:
118	return 0;
119}
120
121static int __get_next_rand_offset_zipf(struct thread_data *td,
122				       struct fio_file *f, enum fio_ddir ddir,
123				       unsigned long long *b)
124{
125	*b = zipf_next(&f->zipf);
126	return 0;
127}
128
129static int __get_next_rand_offset_pareto(struct thread_data *td,
130					 struct fio_file *f, enum fio_ddir ddir,
131					 unsigned long long *b)
132{
133	*b = pareto_next(&f->zipf);
134	return 0;
135}
136
137static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
138				enum fio_ddir ddir, unsigned long long *b)
139{
140	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
141		return __get_next_rand_offset(td, f, ddir, b);
142	else if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
143		return __get_next_rand_offset_zipf(td, f, ddir, b);
144	else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
145		return __get_next_rand_offset_pareto(td, f, ddir, b);
146
147	log_err("fio: unknown random distribution: %d\n", td->o.random_distribution);
148	return 1;
149}
150
151static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
152			       enum fio_ddir ddir, unsigned long long *b)
153{
154	if (!get_next_rand_offset(td, f, ddir, b))
155		return 0;
156
157	if (td->o.time_based) {
158		fio_file_reset(f);
159		if (!get_next_rand_offset(td, f, ddir, b))
160			return 0;
161	}
162
163	dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
164			f->file_name, f->last_pos, f->real_file_size);
165	return 1;
166}
167
168static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
169			       enum fio_ddir ddir, unsigned long long *offset)
170{
171	assert(ddir_rw(ddir));
172
173	if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based)
174		f->last_pos = f->last_pos - f->io_size;
175
176	if (f->last_pos < f->real_file_size) {
177		unsigned long long pos;
178
179		if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
180			f->last_pos = f->real_file_size;
181
182		pos = f->last_pos - f->file_offset;
183		if (pos)
184			pos += td->o.ddir_seq_add;
185
186		*offset = pos;
187		return 0;
188	}
189
190	return 1;
191}
192
193static int get_next_block(struct thread_data *td, struct io_u *io_u,
194			  enum fio_ddir ddir, int rw_seq)
195{
196	struct fio_file *f = io_u->file;
197	unsigned long long b, offset;
198	int ret;
199
200	assert(ddir_rw(ddir));
201
202	b = offset = -1ULL;
203
204	if (rw_seq) {
205		if (td_random(td))
206			ret = get_next_rand_block(td, f, ddir, &b);
207		else
208			ret = get_next_seq_offset(td, f, ddir, &offset);
209	} else {
210		io_u->flags |= IO_U_F_BUSY_OK;
211
212		if (td->o.rw_seq == RW_SEQ_SEQ) {
213			ret = get_next_seq_offset(td, f, ddir, &offset);
214			if (ret)
215				ret = get_next_rand_block(td, f, ddir, &b);
216		} else if (td->o.rw_seq == RW_SEQ_IDENT) {
217			if (f->last_start != -1ULL)
218				offset = f->last_start - f->file_offset;
219			else
220				offset = 0;
221			ret = 0;
222		} else {
223			log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
224			ret = 1;
225		}
226	}
227
228	if (!ret) {
229		if (offset != -1ULL)
230			io_u->offset = offset;
231		else if (b != -1ULL)
232			io_u->offset = b * td->o.ba[ddir];
233		else {
234			log_err("fio: bug in offset generation: offset=%llu, b=%llu\n",
235								offset, b);
236			ret = 1;
237		}
238	}
239
240	return ret;
241}
242
243/*
244 * For random io, generate a random new block and see if it's used. Repeat
245 * until we find a free one. For sequential io, just return the end of
246 * the last io issued.
247 */
248static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
249{
250	struct fio_file *f = io_u->file;
251	enum fio_ddir ddir = io_u->ddir;
252	int rw_seq_hit = 0;
253
254	assert(ddir_rw(ddir));
255
256	if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
257		rw_seq_hit = 1;
258		td->ddir_seq_nr = td->o.ddir_seq_nr;
259	}
260
261	if (get_next_block(td, io_u, ddir, rw_seq_hit))
262		return 1;
263
264	if (io_u->offset >= f->io_size) {
265		dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
266					io_u->offset, f->io_size);
267		return 1;
268	}
269
270	io_u->offset += f->file_offset;
271	if (io_u->offset >= f->real_file_size) {
272		dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
273					io_u->offset, f->real_file_size);
274		return 1;
275	}
276
277	return 0;
278}
279
280static int get_next_offset(struct thread_data *td, struct io_u *io_u)
281{
282	struct prof_io_ops *ops = &td->prof_io_ops;
283
284	if (ops->fill_io_u_off)
285		return ops->fill_io_u_off(td, io_u);
286
287	return __get_next_offset(td, io_u);
288}
289
290static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
291			    unsigned int buflen)
292{
293	struct fio_file *f = io_u->file;
294
295	return io_u->offset + buflen <= f->io_size + get_start_offset(td);
296}
297
298static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
299{
300	const int ddir = io_u->ddir;
301	unsigned int buflen = 0;
302	unsigned int minbs, maxbs;
303	unsigned long r, rand_max;
304
305	assert(ddir_rw(ddir));
306
307	minbs = td->o.min_bs[ddir];
308	maxbs = td->o.max_bs[ddir];
309
310	if (minbs == maxbs)
311		return minbs;
312
313	/*
314	 * If we can't satisfy the min block size from here, then fail
315	 */
316	if (!io_u_fits(td, io_u, minbs))
317		return 0;
318
319	if (td->o.use_os_rand)
320		rand_max = OS_RAND_MAX;
321	else
322		rand_max = FRAND_MAX;
323
324	do {
325		if (td->o.use_os_rand)
326			r = os_random_long(&td->bsrange_state);
327		else
328			r = __rand(&td->__bsrange_state);
329
330		if (!td->o.bssplit_nr[ddir]) {
331			buflen = 1 + (unsigned int) ((double) maxbs *
332					(r / (rand_max + 1.0)));
333			if (buflen < minbs)
334				buflen = minbs;
335		} else {
336			long perc = 0;
337			unsigned int i;
338
339			for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
340				struct bssplit *bsp = &td->o.bssplit[ddir][i];
341
342				buflen = bsp->bs;
343				perc += bsp->perc;
344				if ((r <= ((rand_max / 100L) * perc)) &&
345				    io_u_fits(td, io_u, buflen))
346					break;
347			}
348		}
349
350		if (!td->o.bs_unaligned && is_power_of_2(minbs))
351			buflen = (buflen + minbs - 1) & ~(minbs - 1);
352
353	} while (!io_u_fits(td, io_u, buflen));
354
355	return buflen;
356}
357
358static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
359{
360	struct prof_io_ops *ops = &td->prof_io_ops;
361
362	if (ops->fill_io_u_size)
363		return ops->fill_io_u_size(td, io_u);
364
365	return __get_next_buflen(td, io_u);
366}
367
368static void set_rwmix_bytes(struct thread_data *td)
369{
370	unsigned int diff;
371
372	/*
373	 * we do time or byte based switch. this is needed because
374	 * buffered writes may issue a lot quicker than they complete,
375	 * whereas reads do not.
376	 */
377	diff = td->o.rwmix[td->rwmix_ddir ^ 1];
378	td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
379}
380
381static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
382{
383	unsigned int v;
384	unsigned long r;
385
386	if (td->o.use_os_rand) {
387		r = os_random_long(&td->rwmix_state);
388		v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
389	} else {
390		r = __rand(&td->__rwmix_state);
391		v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
392	}
393
394	if (v <= td->o.rwmix[DDIR_READ])
395		return DDIR_READ;
396
397	return DDIR_WRITE;
398}
399
400static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
401{
402	enum fio_ddir odir = ddir ^ 1;
403	struct timeval t;
404	long usec;
405
406	assert(ddir_rw(ddir));
407
408	if (td->rate_pending_usleep[ddir] <= 0)
409		return ddir;
410
411	/*
412	 * We have too much pending sleep in this direction. See if we
413	 * should switch.
414	 */
415	if (td_rw(td)) {
416		/*
417		 * Other direction does not have too much pending, switch
418		 */
419		if (td->rate_pending_usleep[odir] < 100000)
420			return odir;
421
422		/*
423		 * Both directions have pending sleep. Sleep the minimum time
424		 * and deduct from both.
425		 */
426		if (td->rate_pending_usleep[ddir] <=
427			td->rate_pending_usleep[odir]) {
428			usec = td->rate_pending_usleep[ddir];
429		} else {
430			usec = td->rate_pending_usleep[odir];
431			ddir = odir;
432		}
433	} else
434		usec = td->rate_pending_usleep[ddir];
435
436	/*
437	 * We are going to sleep, ensure that we flush anything pending as
438	 * not to skew our latency numbers.
439	 *
440	 * Changed to only monitor 'in flight' requests here instead of the
441	 * td->cur_depth, b/c td->cur_depth does not accurately represent
442	 * io's that have been actually submitted to an async engine,
443	 * and cur_depth is meaningless for sync engines.
444	 */
445	if (td->io_u_in_flight) {
446		int fio_unused ret;
447
448		ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
449	}
450
451	fio_gettime(&t, NULL);
452	usec_sleep(td, usec);
453	usec = utime_since_now(&t);
454
455	td->rate_pending_usleep[ddir] -= usec;
456
457	odir = ddir ^ 1;
458	if (td_rw(td) && __should_check_rate(td, odir))
459		td->rate_pending_usleep[odir] -= usec;
460
461	if (ddir_trim(ddir))
462		return ddir;
463	return ddir;
464}
465
466/*
467 * Return the data direction for the next io_u. If the job is a
468 * mixed read/write workload, check the rwmix cycle and switch if
469 * necessary.
470 */
471static enum fio_ddir get_rw_ddir(struct thread_data *td)
472{
473	enum fio_ddir ddir;
474
475	/*
476	 * see if it's time to fsync
477	 */
478	if (td->o.fsync_blocks &&
479	   !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
480	     td->io_issues[DDIR_WRITE] && should_fsync(td))
481		return DDIR_SYNC;
482
483	/*
484	 * see if it's time to fdatasync
485	 */
486	if (td->o.fdatasync_blocks &&
487	   !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
488	     td->io_issues[DDIR_WRITE] && should_fsync(td))
489		return DDIR_DATASYNC;
490
491	/*
492	 * see if it's time to sync_file_range
493	 */
494	if (td->sync_file_range_nr &&
495	   !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
496	     td->io_issues[DDIR_WRITE] && should_fsync(td))
497		return DDIR_SYNC_FILE_RANGE;
498
499	if (td_rw(td)) {
500		/*
501		 * Check if it's time to seed a new data direction.
502		 */
503		if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
504			/*
505			 * Put a top limit on how many bytes we do for
506			 * one data direction, to avoid overflowing the
507			 * ranges too much
508			 */
509			ddir = get_rand_ddir(td);
510
511			if (ddir != td->rwmix_ddir)
512				set_rwmix_bytes(td);
513
514			td->rwmix_ddir = ddir;
515		}
516		ddir = td->rwmix_ddir;
517	} else if (td_read(td))
518		ddir = DDIR_READ;
519	else if (td_write(td))
520		ddir = DDIR_WRITE;
521	else
522		ddir = DDIR_TRIM;
523
524	td->rwmix_ddir = rate_ddir(td, ddir);
525	return td->rwmix_ddir;
526}
527
528static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
529{
530	io_u->ddir = get_rw_ddir(td);
531
532	if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
533	    td->o.barrier_blocks &&
534	   !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
535	     td->io_issues[DDIR_WRITE])
536		io_u->flags |= IO_U_F_BARRIER;
537}
538
539void put_file_log(struct thread_data *td, struct fio_file *f)
540{
541	int ret = put_file(td, f);
542
543	if (ret)
544		td_verror(td, ret, "file close");
545}
546
547void put_io_u(struct thread_data *td, struct io_u *io_u)
548{
549	td_io_u_lock(td);
550
551	if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
552		put_file_log(td, io_u->file);
553	io_u->file = NULL;
554	io_u->flags &= ~IO_U_F_FREE_DEF;
555	io_u->flags |= IO_U_F_FREE;
556
557	if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
558		td->cur_depth--;
559	flist_del_init(&io_u->list);
560	flist_add(&io_u->list, &td->io_u_freelist);
561	td_io_u_unlock(td);
562	td_io_u_free_notify(td);
563}
564
565void clear_io_u(struct thread_data *td, struct io_u *io_u)
566{
567	io_u->flags &= ~IO_U_F_FLIGHT;
568	put_io_u(td, io_u);
569}
570
571void requeue_io_u(struct thread_data *td, struct io_u **io_u)
572{
573	struct io_u *__io_u = *io_u;
574
575	dprint(FD_IO, "requeue %p\n", __io_u);
576
577	td_io_u_lock(td);
578
579	__io_u->flags |= IO_U_F_FREE;
580	if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir))
581		td->io_issues[__io_u->ddir]--;
582
583	__io_u->flags &= ~IO_U_F_FLIGHT;
584	if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
585		td->cur_depth--;
586	flist_del(&__io_u->list);
587	flist_add_tail(&__io_u->list, &td->io_u_requeues);
588	td_io_u_unlock(td);
589	*io_u = NULL;
590}
591
592static int fill_io_u(struct thread_data *td, struct io_u *io_u)
593{
594	if (td->io_ops->flags & FIO_NOIO)
595		goto out;
596
597	set_rw_ddir(td, io_u);
598
599	/*
600	 * fsync() or fdatasync() or trim etc, we are done
601	 */
602	if (!ddir_rw(io_u->ddir))
603		goto out;
604
605	/*
606	 * See if it's time to switch to a new zone
607	 */
608	if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
609		td->zone_bytes = 0;
610		io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
611		io_u->file->last_pos = io_u->file->file_offset;
612		td->io_skip_bytes += td->o.zone_skip;
613	}
614
615	/*
616	 * No log, let the seq/rand engine retrieve the next buflen and
617	 * position.
618	 */
619	if (get_next_offset(td, io_u)) {
620		dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
621		return 1;
622	}
623
624	io_u->buflen = get_next_buflen(td, io_u);
625	if (!io_u->buflen) {
626		dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
627		return 1;
628	}
629
630	if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
631		dprint(FD_IO, "io_u %p, offset too large\n", io_u);
632		dprint(FD_IO, "  off=%llu/%lu > %llu\n", io_u->offset,
633				io_u->buflen, io_u->file->real_file_size);
634		return 1;
635	}
636
637	/*
638	 * mark entry before potentially trimming io_u
639	 */
640	if (td_random(td) && file_randommap(td, io_u->file))
641		mark_random_map(td, io_u);
642
643	/*
644	 * If using a write iolog, store this entry.
645	 */
646out:
647	dprint_io_u(io_u, "fill_io_u");
648	td->zone_bytes += io_u->buflen;
649	log_io_u(td, io_u);
650	return 0;
651}
652
653static void __io_u_mark_map(unsigned int *map, unsigned int nr)
654{
655	int idx = 0;
656
657	switch (nr) {
658	default:
659		idx = 6;
660		break;
661	case 33 ... 64:
662		idx = 5;
663		break;
664	case 17 ... 32:
665		idx = 4;
666		break;
667	case 9 ... 16:
668		idx = 3;
669		break;
670	case 5 ... 8:
671		idx = 2;
672		break;
673	case 1 ... 4:
674		idx = 1;
675	case 0:
676		break;
677	}
678
679	map[idx]++;
680}
681
682void io_u_mark_submit(struct thread_data *td, unsigned int nr)
683{
684	__io_u_mark_map(td->ts.io_u_submit, nr);
685	td->ts.total_submit++;
686}
687
688void io_u_mark_complete(struct thread_data *td, unsigned int nr)
689{
690	__io_u_mark_map(td->ts.io_u_complete, nr);
691	td->ts.total_complete++;
692}
693
694void io_u_mark_depth(struct thread_data *td, unsigned int nr)
695{
696	int idx = 0;
697
698	switch (td->cur_depth) {
699	default:
700		idx = 6;
701		break;
702	case 32 ... 63:
703		idx = 5;
704		break;
705	case 16 ... 31:
706		idx = 4;
707		break;
708	case 8 ... 15:
709		idx = 3;
710		break;
711	case 4 ... 7:
712		idx = 2;
713		break;
714	case 2 ... 3:
715		idx = 1;
716	case 1:
717		break;
718	}
719
720	td->ts.io_u_map[idx] += nr;
721}
722
723static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
724{
725	int idx = 0;
726
727	assert(usec < 1000);
728
729	switch (usec) {
730	case 750 ... 999:
731		idx = 9;
732		break;
733	case 500 ... 749:
734		idx = 8;
735		break;
736	case 250 ... 499:
737		idx = 7;
738		break;
739	case 100 ... 249:
740		idx = 6;
741		break;
742	case 50 ... 99:
743		idx = 5;
744		break;
745	case 20 ... 49:
746		idx = 4;
747		break;
748	case 10 ... 19:
749		idx = 3;
750		break;
751	case 4 ... 9:
752		idx = 2;
753		break;
754	case 2 ... 3:
755		idx = 1;
756	case 0 ... 1:
757		break;
758	}
759
760	assert(idx < FIO_IO_U_LAT_U_NR);
761	td->ts.io_u_lat_u[idx]++;
762}
763
764static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
765{
766	int idx = 0;
767
768	switch (msec) {
769	default:
770		idx = 11;
771		break;
772	case 1000 ... 1999:
773		idx = 10;
774		break;
775	case 750 ... 999:
776		idx = 9;
777		break;
778	case 500 ... 749:
779		idx = 8;
780		break;
781	case 250 ... 499:
782		idx = 7;
783		break;
784	case 100 ... 249:
785		idx = 6;
786		break;
787	case 50 ... 99:
788		idx = 5;
789		break;
790	case 20 ... 49:
791		idx = 4;
792		break;
793	case 10 ... 19:
794		idx = 3;
795		break;
796	case 4 ... 9:
797		idx = 2;
798		break;
799	case 2 ... 3:
800		idx = 1;
801	case 0 ... 1:
802		break;
803	}
804
805	assert(idx < FIO_IO_U_LAT_M_NR);
806	td->ts.io_u_lat_m[idx]++;
807}
808
809static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
810{
811	if (usec < 1000)
812		io_u_mark_lat_usec(td, usec);
813	else
814		io_u_mark_lat_msec(td, usec / 1000);
815}
816
817/*
818 * Get next file to service by choosing one at random
819 */
820static struct fio_file *get_next_file_rand(struct thread_data *td,
821					   enum fio_file_flags goodf,
822					   enum fio_file_flags badf)
823{
824	struct fio_file *f;
825	int fno;
826
827	do {
828		int opened = 0;
829		unsigned long r;
830
831		if (td->o.use_os_rand) {
832			r = os_random_long(&td->next_file_state);
833			fno = (unsigned int) ((double) td->o.nr_files
834				* (r / (OS_RAND_MAX + 1.0)));
835		} else {
836			r = __rand(&td->__next_file_state);
837			fno = (unsigned int) ((double) td->o.nr_files
838				* (r / (FRAND_MAX + 1.0)));
839		}
840
841		f = td->files[fno];
842		if (fio_file_done(f))
843			continue;
844
845		if (!fio_file_open(f)) {
846			int err;
847
848			err = td_io_open_file(td, f);
849			if (err)
850				continue;
851			opened = 1;
852		}
853
854		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
855			dprint(FD_FILE, "get_next_file_rand: %p\n", f);
856			return f;
857		}
858		if (opened)
859			td_io_close_file(td, f);
860	} while (1);
861}
862
863/*
864 * Get next file to service by doing round robin between all available ones
865 */
866static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
867					 int badf)
868{
869	unsigned int old_next_file = td->next_file;
870	struct fio_file *f;
871
872	do {
873		int opened = 0;
874
875		f = td->files[td->next_file];
876
877		td->next_file++;
878		if (td->next_file >= td->o.nr_files)
879			td->next_file = 0;
880
881		dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
882		if (fio_file_done(f)) {
883			f = NULL;
884			continue;
885		}
886
887		if (!fio_file_open(f)) {
888			int err;
889
890			err = td_io_open_file(td, f);
891			if (err) {
892				dprint(FD_FILE, "error %d on open of %s\n",
893					err, f->file_name);
894				f = NULL;
895				continue;
896			}
897			opened = 1;
898		}
899
900		dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
901								f->flags);
902		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
903			break;
904
905		if (opened)
906			td_io_close_file(td, f);
907
908		f = NULL;
909	} while (td->next_file != old_next_file);
910
911	dprint(FD_FILE, "get_next_file_rr: %p\n", f);
912	return f;
913}
914
915static struct fio_file *__get_next_file(struct thread_data *td)
916{
917	struct fio_file *f;
918
919	assert(td->o.nr_files <= td->files_index);
920
921	if (td->nr_done_files >= td->o.nr_files) {
922		dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
923				" nr_files=%d\n", td->nr_open_files,
924						  td->nr_done_files,
925						  td->o.nr_files);
926		return NULL;
927	}
928
929	f = td->file_service_file;
930	if (f && fio_file_open(f) && !fio_file_closing(f)) {
931		if (td->o.file_service_type == FIO_FSERVICE_SEQ)
932			goto out;
933		if (td->file_service_left--)
934			goto out;
935	}
936
937	if (td->o.file_service_type == FIO_FSERVICE_RR ||
938	    td->o.file_service_type == FIO_FSERVICE_SEQ)
939		f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
940	else
941		f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
942
943	td->file_service_file = f;
944	td->file_service_left = td->file_service_nr - 1;
945out:
946	dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
947	return f;
948}
949
950static struct fio_file *get_next_file(struct thread_data *td)
951{
952	struct prof_io_ops *ops = &td->prof_io_ops;
953
954	if (ops->get_next_file)
955		return ops->get_next_file(td);
956
957	return __get_next_file(td);
958}
959
960static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
961{
962	struct fio_file *f;
963
964	do {
965		f = get_next_file(td);
966		if (!f)
967			return 1;
968
969		io_u->file = f;
970		get_file(f);
971
972		if (!fill_io_u(td, io_u))
973			break;
974
975		put_file_log(td, f);
976		td_io_close_file(td, f);
977		io_u->file = NULL;
978		fio_file_set_done(f);
979		td->nr_done_files++;
980		dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
981					td->nr_done_files, td->o.nr_files);
982	} while (1);
983
984	return 0;
985}
986
987
988struct io_u *__get_io_u(struct thread_data *td)
989{
990	struct io_u *io_u = NULL;
991
992	td_io_u_lock(td);
993
994again:
995	if (!flist_empty(&td->io_u_requeues))
996		io_u = flist_entry(td->io_u_requeues.next, struct io_u, list);
997	else if (!queue_full(td)) {
998		io_u = flist_entry(td->io_u_freelist.next, struct io_u, list);
999
1000		io_u->buflen = 0;
1001		io_u->resid = 0;
1002		io_u->file = NULL;
1003		io_u->end_io = NULL;
1004	}
1005
1006	if (io_u) {
1007		assert(io_u->flags & IO_U_F_FREE);
1008		io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
1009		io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
1010		io_u->flags &= ~IO_U_F_VER_LIST;
1011
1012		io_u->error = 0;
1013		flist_del(&io_u->list);
1014		flist_add_tail(&io_u->list, &td->io_u_busylist);
1015		td->cur_depth++;
1016		io_u->flags |= IO_U_F_IN_CUR_DEPTH;
1017	} else if (td->o.verify_async) {
1018		/*
1019		 * We ran out, wait for async verify threads to finish and
1020		 * return one
1021		 */
1022		pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1023		goto again;
1024	}
1025
1026	td_io_u_unlock(td);
1027	return io_u;
1028}
1029
1030static int check_get_trim(struct thread_data *td, struct io_u *io_u)
1031{
1032	if (td->o.trim_backlog && td->trim_entries) {
1033		int get_trim = 0;
1034
1035		if (td->trim_batch) {
1036			td->trim_batch--;
1037			get_trim = 1;
1038		} else if (!(td->io_hist_len % td->o.trim_backlog) &&
1039			 td->last_ddir != DDIR_READ) {
1040			td->trim_batch = td->o.trim_batch;
1041			if (!td->trim_batch)
1042				td->trim_batch = td->o.trim_backlog;
1043			get_trim = 1;
1044		}
1045
1046		if (get_trim && !get_next_trim(td, io_u))
1047			return 1;
1048	}
1049
1050	return 0;
1051}
1052
1053static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1054{
1055	if (td->o.verify_backlog && td->io_hist_len) {
1056		int get_verify = 0;
1057
1058		if (td->verify_batch)
1059			get_verify = 1;
1060		else if (!(td->io_hist_len % td->o.verify_backlog) &&
1061			 td->last_ddir != DDIR_READ) {
1062			td->verify_batch = td->o.verify_batch;
1063			if (!td->verify_batch)
1064				td->verify_batch = td->o.verify_backlog;
1065			get_verify = 1;
1066		}
1067
1068		if (get_verify && !get_next_verify(td, io_u)) {
1069			td->verify_batch--;
1070			return 1;
1071		}
1072	}
1073
1074	return 0;
1075}
1076
1077/*
1078 * Fill offset and start time into the buffer content, to prevent too
1079 * easy compressible data for simple de-dupe attempts. Do this for every
1080 * 512b block in the range, since that should be the smallest block size
1081 * we can expect from a device.
1082 */
1083static void small_content_scramble(struct io_u *io_u)
1084{
1085	unsigned int i, nr_blocks = io_u->buflen / 512;
1086	unsigned long long boffset;
1087	unsigned int offset;
1088	void *p, *end;
1089
1090	if (!nr_blocks)
1091		return;
1092
1093	p = io_u->xfer_buf;
1094	boffset = io_u->offset;
1095	io_u->buf_filled_len = 0;
1096
1097	for (i = 0; i < nr_blocks; i++) {
1098		/*
1099		 * Fill the byte offset into a "random" start offset of
1100		 * the buffer, given by the product of the usec time
1101		 * and the actual offset.
1102		 */
1103		offset = (io_u->start_time.tv_usec ^ boffset) & 511;
1104		offset &= ~(sizeof(unsigned long long) - 1);
1105		if (offset >= 512 - sizeof(unsigned long long))
1106			offset -= sizeof(unsigned long long);
1107		memcpy(p + offset, &boffset, sizeof(boffset));
1108
1109		end = p + 512 - sizeof(io_u->start_time);
1110		memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1111		p += 512;
1112		boffset += 512;
1113	}
1114}
1115
1116/*
1117 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1118 * etc. The returned io_u is fully ready to be prepped and submitted.
1119 */
1120struct io_u *get_io_u(struct thread_data *td)
1121{
1122	struct fio_file *f;
1123	struct io_u *io_u;
1124	int do_scramble = 0;
1125
1126	io_u = __get_io_u(td);
1127	if (!io_u) {
1128		dprint(FD_IO, "__get_io_u failed\n");
1129		return NULL;
1130	}
1131
1132	if (check_get_verify(td, io_u))
1133		goto out;
1134	if (check_get_trim(td, io_u))
1135		goto out;
1136
1137	/*
1138	 * from a requeue, io_u already setup
1139	 */
1140	if (io_u->file)
1141		goto out;
1142
1143	/*
1144	 * If using an iolog, grab next piece if any available.
1145	 */
1146	if (td->o.read_iolog_file) {
1147		if (read_iolog_get(td, io_u))
1148			goto err_put;
1149	} else if (set_io_u_file(td, io_u)) {
1150		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1151		goto err_put;
1152	}
1153
1154	f = io_u->file;
1155	assert(fio_file_open(f));
1156
1157	if (ddir_rw(io_u->ddir)) {
1158		if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
1159			dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
1160			goto err_put;
1161		}
1162
1163		f->last_start = io_u->offset;
1164		f->last_pos = io_u->offset + io_u->buflen;
1165
1166		if (io_u->ddir == DDIR_WRITE) {
1167			if (td->o.refill_buffers) {
1168				io_u_fill_buffer(td, io_u,
1169					io_u->xfer_buflen, io_u->xfer_buflen);
1170			} else if (td->o.scramble_buffers)
1171				do_scramble = 1;
1172			if (td->o.verify != VERIFY_NONE) {
1173				populate_verify_io_u(td, io_u);
1174				do_scramble = 0;
1175			}
1176		} else if (io_u->ddir == DDIR_READ) {
1177			/*
1178			 * Reset the buf_filled parameters so next time if the
1179			 * buffer is used for writes it is refilled.
1180			 */
1181			io_u->buf_filled_len = 0;
1182		}
1183	}
1184
1185	/*
1186	 * Set io data pointers.
1187	 */
1188	io_u->xfer_buf = io_u->buf;
1189	io_u->xfer_buflen = io_u->buflen;
1190
1191out:
1192	assert(io_u->file);
1193	if (!td_io_prep(td, io_u)) {
1194		if (!td->o.disable_slat)
1195			fio_gettime(&io_u->start_time, NULL);
1196		if (do_scramble)
1197			small_content_scramble(io_u);
1198		return io_u;
1199	}
1200err_put:
1201	dprint(FD_IO, "get_io_u failed\n");
1202	put_io_u(td, io_u);
1203	return NULL;
1204}
1205
1206void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1207{
1208	enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
1209	const char *msg[] = { "read", "write", "sync", "datasync",
1210				"sync_file_range", "wait", "trim" };
1211
1212	if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1213		return;
1214
1215	log_err("fio: io_u error");
1216
1217	if (io_u->file)
1218		log_err(" on file %s", io_u->file->file_name);
1219
1220	log_err(": %s\n", strerror(io_u->error));
1221
1222	log_err("     %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1223					io_u->offset, io_u->xfer_buflen);
1224
1225	if (!td->error)
1226		td_verror(td, io_u->error, "io_u error");
1227}
1228
1229static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1230				  struct io_completion_data *icd,
1231				  const enum fio_ddir idx, unsigned int bytes)
1232{
1233	unsigned long lusec = 0;
1234
1235	if (!td->o.disable_clat || !td->o.disable_bw)
1236		lusec = utime_since(&io_u->issue_time, &icd->time);
1237
1238	if (!td->o.disable_lat) {
1239		unsigned long tusec;
1240
1241		tusec = utime_since(&io_u->start_time, &icd->time);
1242		add_lat_sample(td, idx, tusec, bytes);
1243
1244		if (td->o.max_latency && tusec > td->o.max_latency) {
1245			if (!td->error)
1246				log_err("fio: latency of %lu usec exceeds specified max (%u usec)\n", tusec, td->o.max_latency);
1247			td_verror(td, ETIMEDOUT, "max latency exceeded");
1248			icd->error = ETIMEDOUT;
1249		}
1250	}
1251
1252	if (!td->o.disable_clat) {
1253		add_clat_sample(td, idx, lusec, bytes);
1254		io_u_mark_latency(td, lusec);
1255	}
1256
1257	if (!td->o.disable_bw)
1258		add_bw_sample(td, idx, bytes, &icd->time);
1259
1260	add_iops_sample(td, idx, &icd->time);
1261}
1262
1263static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1264{
1265	unsigned long long secs, remainder, bps, bytes;
1266	bytes = td->this_io_bytes[ddir];
1267	bps = td->rate_bps[ddir];
1268	secs = bytes / bps;
1269	remainder = bytes % bps;
1270	return remainder * 1000000 / bps + secs * 1000000;
1271}
1272
1273static void io_completed(struct thread_data *td, struct io_u *io_u,
1274			 struct io_completion_data *icd)
1275{
1276	struct fio_file *f;
1277
1278	dprint_io_u(io_u, "io complete");
1279
1280	td_io_u_lock(td);
1281	assert(io_u->flags & IO_U_F_FLIGHT);
1282	io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
1283	td_io_u_unlock(td);
1284
1285	if (ddir_sync(io_u->ddir)) {
1286		td->last_was_sync = 1;
1287		f = io_u->file;
1288		if (f) {
1289			f->first_write = -1ULL;
1290			f->last_write = -1ULL;
1291		}
1292		return;
1293	}
1294
1295	td->last_was_sync = 0;
1296	td->last_ddir = io_u->ddir;
1297
1298	if (!io_u->error && ddir_rw(io_u->ddir)) {
1299		unsigned int bytes = io_u->buflen - io_u->resid;
1300		const enum fio_ddir idx = io_u->ddir;
1301		const enum fio_ddir odx = io_u->ddir ^ 1;
1302		int ret;
1303
1304		td->io_blocks[idx]++;
1305		td->this_io_blocks[idx]++;
1306		td->io_bytes[idx] += bytes;
1307
1308		if (!(io_u->flags & IO_U_F_VER_LIST))
1309			td->this_io_bytes[idx] += bytes;
1310
1311		if (idx == DDIR_WRITE) {
1312			f = io_u->file;
1313			if (f) {
1314				if (f->first_write == -1ULL ||
1315				    io_u->offset < f->first_write)
1316					f->first_write = io_u->offset;
1317				if (f->last_write == -1ULL ||
1318				    ((io_u->offset + bytes) > f->last_write))
1319					f->last_write = io_u->offset + bytes;
1320			}
1321		}
1322
1323		if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1324					   td->runstate == TD_VERIFYING)) {
1325			account_io_completion(td, io_u, icd, idx, bytes);
1326
1327			if (__should_check_rate(td, idx)) {
1328				td->rate_pending_usleep[idx] =
1329					(usec_for_io(td, idx) -
1330					 utime_since_now(&td->start));
1331			}
1332			if (idx != DDIR_TRIM && __should_check_rate(td, odx))
1333				td->rate_pending_usleep[odx] =
1334					(usec_for_io(td, odx) -
1335					 utime_since_now(&td->start));
1336		}
1337
1338		if (td_write(td) && idx == DDIR_WRITE &&
1339		    td->o.do_verify &&
1340		    td->o.verify != VERIFY_NONE)
1341			log_io_piece(td, io_u);
1342
1343		icd->bytes_done[idx] += bytes;
1344
1345		if (io_u->end_io) {
1346			ret = io_u->end_io(td, io_u);
1347			if (ret && !icd->error)
1348				icd->error = ret;
1349		}
1350	} else if (io_u->error) {
1351		icd->error = io_u->error;
1352		io_u_log_error(td, io_u);
1353	}
1354	if (icd->error) {
1355		enum error_type_bit eb = td_error_type(io_u->ddir, icd->error);
1356		if (!td_non_fatal_error(td, eb, icd->error))
1357			return;
1358		/*
1359		 * If there is a non_fatal error, then add to the error count
1360		 * and clear all the errors.
1361		 */
1362		update_error_count(td, icd->error);
1363		td_clear_error(td);
1364		icd->error = 0;
1365		io_u->error = 0;
1366	}
1367}
1368
1369static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1370		     int nr)
1371{
1372	int ddir;
1373	if (!td->o.disable_clat || !td->o.disable_bw)
1374		fio_gettime(&icd->time, NULL);
1375
1376	icd->nr = nr;
1377
1378	icd->error = 0;
1379	for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1380		icd->bytes_done[ddir] = 0;
1381}
1382
1383static void ios_completed(struct thread_data *td,
1384			  struct io_completion_data *icd)
1385{
1386	struct io_u *io_u;
1387	int i;
1388
1389	for (i = 0; i < icd->nr; i++) {
1390		io_u = td->io_ops->event(td, i);
1391
1392		io_completed(td, io_u, icd);
1393
1394		if (!(io_u->flags & IO_U_F_FREE_DEF))
1395			put_io_u(td, io_u);
1396	}
1397}
1398
1399/*
1400 * Complete a single io_u for the sync engines.
1401 */
1402int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
1403		       unsigned long *bytes)
1404{
1405	struct io_completion_data icd;
1406
1407	init_icd(td, &icd, 1);
1408	io_completed(td, io_u, &icd);
1409
1410	if (!(io_u->flags & IO_U_F_FREE_DEF))
1411		put_io_u(td, io_u);
1412
1413	if (icd.error) {
1414		td_verror(td, icd.error, "io_u_sync_complete");
1415		return -1;
1416	}
1417
1418	if (bytes) {
1419		int ddir;
1420
1421		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1422			bytes[ddir] += icd.bytes_done[ddir];
1423	}
1424
1425	return 0;
1426}
1427
1428/*
1429 * Called to complete min_events number of io for the async engines.
1430 */
1431int io_u_queued_complete(struct thread_data *td, int min_evts,
1432			 unsigned long *bytes)
1433{
1434	struct io_completion_data icd;
1435	struct timespec *tvp = NULL;
1436	int ret;
1437	struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
1438
1439	dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
1440
1441	if (!min_evts)
1442		tvp = &ts;
1443
1444	ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
1445	if (ret < 0) {
1446		td_verror(td, -ret, "td_io_getevents");
1447		return ret;
1448	} else if (!ret)
1449		return ret;
1450
1451	init_icd(td, &icd, ret);
1452	ios_completed(td, &icd);
1453	if (icd.error) {
1454		td_verror(td, icd.error, "io_u_queued_complete");
1455		return -1;
1456	}
1457
1458	if (bytes) {
1459		int ddir;
1460
1461		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1462			bytes[ddir] += icd.bytes_done[ddir];
1463	}
1464
1465	return 0;
1466}
1467
1468/*
1469 * Call when io_u is really queued, to update the submission latency.
1470 */
1471void io_u_queued(struct thread_data *td, struct io_u *io_u)
1472{
1473	if (!td->o.disable_slat) {
1474		unsigned long slat_time;
1475
1476		slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
1477		add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
1478	}
1479}
1480
1481/*
1482 * "randomly" fill the buffer contents
1483 */
1484void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1485		      unsigned int min_write, unsigned int max_bs)
1486{
1487	io_u->buf_filled_len = 0;
1488
1489	if (!td->o.zero_buffers) {
1490		unsigned int perc = td->o.compress_percentage;
1491
1492		if (perc) {
1493			unsigned int seg = min_write;
1494
1495			seg = min(min_write, td->o.compress_chunk);
1496			fill_random_buf_percentage(&td->buf_state, io_u->buf,
1497						perc, seg, max_bs);
1498		} else
1499			fill_random_buf(&td->buf_state, io_u->buf, max_bs);
1500	} else
1501		memset(io_u->buf, 0, max_bs);
1502}
1503