io_u.c revision 155015351132ad00e2e21cb63f52fee91e56e92a
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <signal.h>
5#include <time.h>
6#include <assert.h>
7
8#include "fio.h"
9#include "hash.h"
10#include "verify.h"
11#include "trim.h"
12#include "lib/rand.h"
13
14struct io_completion_data {
15	int nr;				/* input */
16
17	int error;			/* output */
18	unsigned long bytes_done[DDIR_RWDIR_CNT];	/* output */
19	struct timeval time;		/* output */
20};
21
22/*
23 * The ->file_map[] contains a map of blocks we have or have not done io
24 * to yet. Used to make sure we cover the entire range in a fair fashion.
25 */
26static int random_map_free(struct fio_file *f, const unsigned long long block)
27{
28	unsigned int idx = RAND_MAP_IDX(f, block);
29	unsigned int bit = RAND_MAP_BIT(f, block);
30
31	dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit);
32
33	return (f->file_map[idx] & (1UL << bit)) == 0;
34}
35
36/*
37 * Mark a given offset as used in the map.
38 */
39static void mark_random_map(struct thread_data *td, struct io_u *io_u)
40{
41	unsigned int min_bs = td->o.rw_min_bs;
42	struct fio_file *f = io_u->file;
43	unsigned long long block;
44	unsigned int blocks, nr_blocks;
45	int busy_check;
46
47	block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs;
48	nr_blocks = (io_u->buflen + min_bs - 1) / min_bs;
49	blocks = 0;
50	busy_check = !(io_u->flags & IO_U_F_BUSY_OK);
51
52	while (nr_blocks) {
53		unsigned int idx, bit;
54		unsigned long mask, this_blocks;
55
56		/*
57		 * If we have a mixed random workload, we may
58		 * encounter blocks we already did IO to.
59		 */
60		if (!busy_check) {
61			blocks = nr_blocks;
62			break;
63		}
64		if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block))
65			break;
66
67		idx = RAND_MAP_IDX(f, block);
68		bit = RAND_MAP_BIT(f, block);
69
70		fio_assert(td, idx < f->num_maps);
71
72		this_blocks = nr_blocks;
73		if (this_blocks + bit > BLOCKS_PER_MAP)
74			this_blocks = BLOCKS_PER_MAP - bit;
75
76		do {
77			if (this_blocks == BLOCKS_PER_MAP)
78				mask = -1UL;
79			else
80				mask = ((1UL << this_blocks) - 1) << bit;
81
82			if (!(f->file_map[idx] & mask))
83				break;
84
85			this_blocks--;
86		} while (this_blocks);
87
88		if (!this_blocks)
89			break;
90
91		f->file_map[idx] |= mask;
92		nr_blocks -= this_blocks;
93		blocks += this_blocks;
94		block += this_blocks;
95	}
96
97	if ((blocks * min_bs) < io_u->buflen)
98		io_u->buflen = blocks * min_bs;
99}
100
101static unsigned long long last_block(struct thread_data *td, struct fio_file *f,
102				     enum fio_ddir ddir)
103{
104	unsigned long long max_blocks;
105	unsigned long long max_size;
106
107	assert(ddir_rw(ddir));
108
109	/*
110	 * Hmm, should we make sure that ->io_size <= ->real_file_size?
111	 */
112	max_size = f->io_size;
113	if (max_size > f->real_file_size)
114		max_size = f->real_file_size;
115
116	if (td->o.zone_range)
117		max_size = td->o.zone_range;
118
119	max_blocks = max_size / (unsigned long long) td->o.ba[ddir];
120	if (!max_blocks)
121		return 0;
122
123	return max_blocks;
124}
125
126/*
127 * Return the next free block in the map.
128 */
129static int get_next_free_block(struct thread_data *td, struct fio_file *f,
130			       enum fio_ddir ddir, unsigned long long *b)
131{
132	unsigned long long block, min_bs = td->o.rw_min_bs, lastb;
133	int i;
134
135	lastb = last_block(td, f, ddir);
136	if (!lastb)
137		return 1;
138
139	i = f->last_free_lookup;
140	block = i * BLOCKS_PER_MAP;
141	while (block * min_bs < f->real_file_size &&
142		block * min_bs < f->io_size) {
143		if (f->file_map[i] != -1UL) {
144			block += ffz(f->file_map[i]);
145			if (block > lastb)
146				break;
147			f->last_free_lookup = i;
148			*b = block;
149			return 0;
150		}
151
152		block += BLOCKS_PER_MAP;
153		i++;
154	}
155
156	dprint(FD_IO, "failed finding a free block\n");
157	return 1;
158}
159
160static int get_next_rand_offset(struct thread_data *td, struct fio_file *f,
161				enum fio_ddir ddir, unsigned long long *b)
162{
163	unsigned long long rmax, r, lastb;
164	int loops = 5;
165
166	lastb = last_block(td, f, ddir);
167	if (!lastb)
168		return 1;
169
170	if (f->failed_rands >= 200)
171		goto ffz;
172
173	rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX;
174	do {
175		if (td->o.use_os_rand)
176			r = os_random_long(&td->random_state);
177		else
178			r = __rand(&td->__random_state);
179
180		*b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0));
181
182		dprint(FD_RANDOM, "off rand %llu\n", r);
183
184
185		/*
186		 * if we are not maintaining a random map, we are done.
187		 */
188		if (!file_randommap(td, f))
189			goto ret_good;
190
191		/*
192		 * calculate map offset and check if it's free
193		 */
194		if (random_map_free(f, *b))
195			goto ret_good;
196
197		dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n",
198									*b);
199	} while (--loops);
200
201	if (!f->failed_rands++)
202		f->last_free_lookup = 0;
203
204	/*
205	 * we get here, if we didn't suceed in looking up a block. generate
206	 * a random start offset into the filemap, and find the first free
207	 * block from there.
208	 */
209	loops = 10;
210	do {
211		f->last_free_lookup = (f->num_maps - 1) *
212					(r / ((unsigned long long) rmax + 1.0));
213		if (!get_next_free_block(td, f, ddir, b))
214			goto ret;
215
216		if (td->o.use_os_rand)
217			r = os_random_long(&td->random_state);
218		else
219			r = __rand(&td->__random_state);
220	} while (--loops);
221
222	/*
223	 * that didn't work either, try exhaustive search from the start
224	 */
225	f->last_free_lookup = 0;
226ffz:
227	if (!get_next_free_block(td, f, ddir, b))
228		return 0;
229	f->last_free_lookup = 0;
230	return get_next_free_block(td, f, ddir, b);
231ret_good:
232	f->failed_rands = 0;
233ret:
234	return 0;
235}
236
237static int get_next_rand_block(struct thread_data *td, struct fio_file *f,
238			       enum fio_ddir ddir, unsigned long long *b)
239{
240	if (!get_next_rand_offset(td, f, ddir, b))
241		return 0;
242
243	if (td->o.time_based) {
244		fio_file_reset(f);
245		if (!get_next_rand_offset(td, f, ddir, b))
246			return 0;
247	}
248
249	dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n",
250			f->file_name, f->last_pos, f->real_file_size);
251	return 1;
252}
253
254static int get_next_seq_offset(struct thread_data *td, struct fio_file *f,
255			       enum fio_ddir ddir, unsigned long long *offset)
256{
257	assert(ddir_rw(ddir));
258
259	if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based)
260		f->last_pos = f->last_pos - f->io_size;
261
262	if (f->last_pos < f->real_file_size) {
263		unsigned long long pos;
264
265		if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0)
266			f->last_pos = f->real_file_size;
267
268		pos = f->last_pos - f->file_offset;
269		if (pos)
270			pos += td->o.ddir_seq_add;
271
272		*offset = pos;
273		return 0;
274	}
275
276	return 1;
277}
278
279static int get_next_block(struct thread_data *td, struct io_u *io_u,
280			  enum fio_ddir ddir, int rw_seq)
281{
282	struct fio_file *f = io_u->file;
283	unsigned long long b, offset;
284	int ret;
285
286	assert(ddir_rw(ddir));
287
288	b = offset = -1ULL;
289
290	if (rw_seq) {
291		if (td_random(td))
292			ret = get_next_rand_block(td, f, ddir, &b);
293		else
294			ret = get_next_seq_offset(td, f, ddir, &offset);
295	} else {
296		io_u->flags |= IO_U_F_BUSY_OK;
297
298		if (td->o.rw_seq == RW_SEQ_SEQ) {
299			ret = get_next_seq_offset(td, f, ddir, &offset);
300			if (ret)
301				ret = get_next_rand_block(td, f, ddir, &b);
302		} else if (td->o.rw_seq == RW_SEQ_IDENT) {
303			if (f->last_start != -1ULL)
304				offset = f->last_start - f->file_offset;
305			else
306				offset = 0;
307			ret = 0;
308		} else {
309			log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq);
310			ret = 1;
311		}
312	}
313
314	if (!ret) {
315		if (offset != -1ULL)
316			io_u->offset = offset;
317		else if (b != -1ULL)
318			io_u->offset = b * td->o.ba[ddir];
319		else {
320			log_err("fio: bug in offset generation\n");
321			ret = 1;
322		}
323	}
324
325	return ret;
326}
327
328/*
329 * For random io, generate a random new block and see if it's used. Repeat
330 * until we find a free one. For sequential io, just return the end of
331 * the last io issued.
332 */
333static int __get_next_offset(struct thread_data *td, struct io_u *io_u)
334{
335	struct fio_file *f = io_u->file;
336	enum fio_ddir ddir = io_u->ddir;
337	int rw_seq_hit = 0;
338
339	assert(ddir_rw(ddir));
340
341	if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) {
342		rw_seq_hit = 1;
343		td->ddir_seq_nr = td->o.ddir_seq_nr;
344	}
345
346	if (get_next_block(td, io_u, ddir, rw_seq_hit))
347		return 1;
348
349	if (io_u->offset >= f->io_size) {
350		dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n",
351					io_u->offset, f->io_size);
352		return 1;
353	}
354
355	io_u->offset += f->file_offset;
356	if (io_u->offset >= f->real_file_size) {
357		dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n",
358					io_u->offset, f->real_file_size);
359		return 1;
360	}
361
362	return 0;
363}
364
365static int get_next_offset(struct thread_data *td, struct io_u *io_u)
366{
367	struct prof_io_ops *ops = &td->prof_io_ops;
368
369	if (ops->fill_io_u_off)
370		return ops->fill_io_u_off(td, io_u);
371
372	return __get_next_offset(td, io_u);
373}
374
375static inline int io_u_fits(struct thread_data *td, struct io_u *io_u,
376			    unsigned int buflen)
377{
378	struct fio_file *f = io_u->file;
379
380	return io_u->offset + buflen <= f->io_size + get_start_offset(td);
381}
382
383static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u)
384{
385	const int ddir = io_u->ddir;
386	unsigned int uninitialized_var(buflen);
387	unsigned int minbs, maxbs;
388	unsigned long r, rand_max;
389
390	assert(ddir_rw(ddir));
391
392	minbs = td->o.min_bs[ddir];
393	maxbs = td->o.max_bs[ddir];
394
395	if (minbs == maxbs)
396		return minbs;
397
398	/*
399	 * If we can't satisfy the min block size from here, then fail
400	 */
401	if (!io_u_fits(td, io_u, minbs))
402		return 0;
403
404	if (td->o.use_os_rand)
405		rand_max = OS_RAND_MAX;
406	else
407		rand_max = FRAND_MAX;
408
409	do {
410		if (td->o.use_os_rand)
411			r = os_random_long(&td->bsrange_state);
412		else
413			r = __rand(&td->__bsrange_state);
414
415		if (!td->o.bssplit_nr[ddir]) {
416			buflen = 1 + (unsigned int) ((double) maxbs *
417					(r / (rand_max + 1.0)));
418			if (buflen < minbs)
419				buflen = minbs;
420		} else {
421			long perc = 0;
422			unsigned int i;
423
424			for (i = 0; i < td->o.bssplit_nr[ddir]; i++) {
425				struct bssplit *bsp = &td->o.bssplit[ddir][i];
426
427				buflen = bsp->bs;
428				perc += bsp->perc;
429				if ((r <= ((rand_max / 100L) * perc)) &&
430				    io_u_fits(td, io_u, buflen))
431					break;
432			}
433		}
434
435		if (!td->o.bs_unaligned && is_power_of_2(minbs))
436			buflen = (buflen + minbs - 1) & ~(minbs - 1);
437
438	} while (!io_u_fits(td, io_u, buflen));
439
440	return buflen;
441}
442
443static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u)
444{
445	struct prof_io_ops *ops = &td->prof_io_ops;
446
447	if (ops->fill_io_u_size)
448		return ops->fill_io_u_size(td, io_u);
449
450	return __get_next_buflen(td, io_u);
451}
452
453static void set_rwmix_bytes(struct thread_data *td)
454{
455	unsigned int diff;
456
457	/*
458	 * we do time or byte based switch. this is needed because
459	 * buffered writes may issue a lot quicker than they complete,
460	 * whereas reads do not.
461	 */
462	diff = td->o.rwmix[td->rwmix_ddir ^ 1];
463	td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100;
464}
465
466static inline enum fio_ddir get_rand_ddir(struct thread_data *td)
467{
468	unsigned int v;
469	unsigned long r;
470
471	if (td->o.use_os_rand) {
472		r = os_random_long(&td->rwmix_state);
473		v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0)));
474	} else {
475		r = __rand(&td->__rwmix_state);
476		v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0)));
477	}
478
479	if (v <= td->o.rwmix[DDIR_READ])
480		return DDIR_READ;
481
482	return DDIR_WRITE;
483}
484
485static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
486{
487	enum fio_ddir odir = ddir ^ 1;
488	struct timeval t;
489	long usec;
490
491	assert(ddir_rw(ddir));
492
493	if (td->rate_pending_usleep[ddir] <= 0)
494		return ddir;
495
496	/*
497	 * We have too much pending sleep in this direction. See if we
498	 * should switch.
499	 */
500	if (td_rw(td)) {
501		/*
502		 * Other direction does not have too much pending, switch
503		 */
504		if (td->rate_pending_usleep[odir] < 100000)
505			return odir;
506
507		/*
508		 * Both directions have pending sleep. Sleep the minimum time
509		 * and deduct from both.
510		 */
511		if (td->rate_pending_usleep[ddir] <=
512			td->rate_pending_usleep[odir]) {
513			usec = td->rate_pending_usleep[ddir];
514		} else {
515			usec = td->rate_pending_usleep[odir];
516			ddir = odir;
517		}
518	} else
519		usec = td->rate_pending_usleep[ddir];
520
521	/*
522	 * We are going to sleep, ensure that we flush anything pending as
523	 * not to skew our latency numbers.
524	 *
525	 * Changed to only monitor 'in flight' requests here instead of the
526	 * td->cur_depth, b/c td->cur_depth does not accurately represent
527	 * io's that have been actually submitted to an async engine,
528	 * and cur_depth is meaningless for sync engines.
529	 */
530	if (td->io_u_in_flight) {
531		int fio_unused ret;
532
533		ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
534	}
535
536	fio_gettime(&t, NULL);
537	usec_sleep(td, usec);
538	usec = utime_since_now(&t);
539
540	td->rate_pending_usleep[ddir] -= usec;
541
542	odir = ddir ^ 1;
543	if (td_rw(td) && __should_check_rate(td, odir))
544		td->rate_pending_usleep[odir] -= usec;
545
546	if (ddir_trim(ddir))
547		return ddir;
548	return ddir;
549}
550
551/*
552 * Return the data direction for the next io_u. If the job is a
553 * mixed read/write workload, check the rwmix cycle and switch if
554 * necessary.
555 */
556static enum fio_ddir get_rw_ddir(struct thread_data *td)
557{
558	enum fio_ddir ddir;
559
560	/*
561	 * see if it's time to fsync
562	 */
563	if (td->o.fsync_blocks &&
564	   !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) &&
565	     td->io_issues[DDIR_WRITE] && should_fsync(td))
566		return DDIR_SYNC;
567
568	/*
569	 * see if it's time to fdatasync
570	 */
571	if (td->o.fdatasync_blocks &&
572	   !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) &&
573	     td->io_issues[DDIR_WRITE] && should_fsync(td))
574		return DDIR_DATASYNC;
575
576	/*
577	 * see if it's time to sync_file_range
578	 */
579	if (td->sync_file_range_nr &&
580	   !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) &&
581	     td->io_issues[DDIR_WRITE] && should_fsync(td))
582		return DDIR_SYNC_FILE_RANGE;
583
584	if (td_rw(td)) {
585		/*
586		 * Check if it's time to seed a new data direction.
587		 */
588		if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) {
589			/*
590			 * Put a top limit on how many bytes we do for
591			 * one data direction, to avoid overflowing the
592			 * ranges too much
593			 */
594			ddir = get_rand_ddir(td);
595
596			if (ddir != td->rwmix_ddir)
597				set_rwmix_bytes(td);
598
599			td->rwmix_ddir = ddir;
600		}
601		ddir = td->rwmix_ddir;
602	} else if (td_read(td))
603		ddir = DDIR_READ;
604	else if (td_write(td))
605		ddir = DDIR_WRITE;
606	else
607		ddir = DDIR_TRIM;
608
609	td->rwmix_ddir = rate_ddir(td, ddir);
610	return td->rwmix_ddir;
611}
612
613static void set_rw_ddir(struct thread_data *td, struct io_u *io_u)
614{
615	io_u->ddir = get_rw_ddir(td);
616
617	if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) &&
618	    td->o.barrier_blocks &&
619	   !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) &&
620	     td->io_issues[DDIR_WRITE])
621		io_u->flags |= IO_U_F_BARRIER;
622}
623
624void put_file_log(struct thread_data *td, struct fio_file *f)
625{
626	int ret = put_file(td, f);
627
628	if (ret)
629		td_verror(td, ret, "file close");
630}
631
632void put_io_u(struct thread_data *td, struct io_u *io_u)
633{
634	td_io_u_lock(td);
635
636	if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF))
637		put_file_log(td, io_u->file);
638	io_u->file = NULL;
639	io_u->flags &= ~IO_U_F_FREE_DEF;
640	io_u->flags |= IO_U_F_FREE;
641
642	if (io_u->flags & IO_U_F_IN_CUR_DEPTH)
643		td->cur_depth--;
644	flist_del_init(&io_u->list);
645	flist_add(&io_u->list, &td->io_u_freelist);
646	td_io_u_unlock(td);
647	td_io_u_free_notify(td);
648}
649
650void clear_io_u(struct thread_data *td, struct io_u *io_u)
651{
652	io_u->flags &= ~IO_U_F_FLIGHT;
653	put_io_u(td, io_u);
654}
655
656void requeue_io_u(struct thread_data *td, struct io_u **io_u)
657{
658	struct io_u *__io_u = *io_u;
659
660	dprint(FD_IO, "requeue %p\n", __io_u);
661
662	td_io_u_lock(td);
663
664	__io_u->flags |= IO_U_F_FREE;
665	if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir))
666		td->io_issues[__io_u->ddir]--;
667
668	__io_u->flags &= ~IO_U_F_FLIGHT;
669	if (__io_u->flags & IO_U_F_IN_CUR_DEPTH)
670		td->cur_depth--;
671	flist_del(&__io_u->list);
672	flist_add_tail(&__io_u->list, &td->io_u_requeues);
673	td_io_u_unlock(td);
674	*io_u = NULL;
675}
676
677static int fill_io_u(struct thread_data *td, struct io_u *io_u)
678{
679	if (td->io_ops->flags & FIO_NOIO)
680		goto out;
681
682	set_rw_ddir(td, io_u);
683
684	/*
685	 * fsync() or fdatasync() or trim etc, we are done
686	 */
687	if (!ddir_rw(io_u->ddir))
688		goto out;
689
690	/*
691	 * See if it's time to switch to a new zone
692	 */
693	if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) {
694		td->zone_bytes = 0;
695		io_u->file->file_offset += td->o.zone_range + td->o.zone_skip;
696		io_u->file->last_pos = io_u->file->file_offset;
697		td->io_skip_bytes += td->o.zone_skip;
698	}
699
700	/*
701	 * No log, let the seq/rand engine retrieve the next buflen and
702	 * position.
703	 */
704	if (get_next_offset(td, io_u)) {
705		dprint(FD_IO, "io_u %p, failed getting offset\n", io_u);
706		return 1;
707	}
708
709	io_u->buflen = get_next_buflen(td, io_u);
710	if (!io_u->buflen) {
711		dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u);
712		return 1;
713	}
714
715	if (io_u->offset + io_u->buflen > io_u->file->real_file_size) {
716		dprint(FD_IO, "io_u %p, offset too large\n", io_u);
717		dprint(FD_IO, "  off=%llu/%lu > %llu\n", io_u->offset,
718				io_u->buflen, io_u->file->real_file_size);
719		return 1;
720	}
721
722	/*
723	 * mark entry before potentially trimming io_u
724	 */
725	if (td_random(td) && file_randommap(td, io_u->file))
726		mark_random_map(td, io_u);
727
728	/*
729	 * If using a write iolog, store this entry.
730	 */
731out:
732	dprint_io_u(io_u, "fill_io_u");
733	td->zone_bytes += io_u->buflen;
734	log_io_u(td, io_u);
735	return 0;
736}
737
738static void __io_u_mark_map(unsigned int *map, unsigned int nr)
739{
740	int idx = 0;
741
742	switch (nr) {
743	default:
744		idx = 6;
745		break;
746	case 33 ... 64:
747		idx = 5;
748		break;
749	case 17 ... 32:
750		idx = 4;
751		break;
752	case 9 ... 16:
753		idx = 3;
754		break;
755	case 5 ... 8:
756		idx = 2;
757		break;
758	case 1 ... 4:
759		idx = 1;
760	case 0:
761		break;
762	}
763
764	map[idx]++;
765}
766
767void io_u_mark_submit(struct thread_data *td, unsigned int nr)
768{
769	__io_u_mark_map(td->ts.io_u_submit, nr);
770	td->ts.total_submit++;
771}
772
773void io_u_mark_complete(struct thread_data *td, unsigned int nr)
774{
775	__io_u_mark_map(td->ts.io_u_complete, nr);
776	td->ts.total_complete++;
777}
778
779void io_u_mark_depth(struct thread_data *td, unsigned int nr)
780{
781	int idx = 0;
782
783	switch (td->cur_depth) {
784	default:
785		idx = 6;
786		break;
787	case 32 ... 63:
788		idx = 5;
789		break;
790	case 16 ... 31:
791		idx = 4;
792		break;
793	case 8 ... 15:
794		idx = 3;
795		break;
796	case 4 ... 7:
797		idx = 2;
798		break;
799	case 2 ... 3:
800		idx = 1;
801	case 1:
802		break;
803	}
804
805	td->ts.io_u_map[idx] += nr;
806}
807
808static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec)
809{
810	int idx = 0;
811
812	assert(usec < 1000);
813
814	switch (usec) {
815	case 750 ... 999:
816		idx = 9;
817		break;
818	case 500 ... 749:
819		idx = 8;
820		break;
821	case 250 ... 499:
822		idx = 7;
823		break;
824	case 100 ... 249:
825		idx = 6;
826		break;
827	case 50 ... 99:
828		idx = 5;
829		break;
830	case 20 ... 49:
831		idx = 4;
832		break;
833	case 10 ... 19:
834		idx = 3;
835		break;
836	case 4 ... 9:
837		idx = 2;
838		break;
839	case 2 ... 3:
840		idx = 1;
841	case 0 ... 1:
842		break;
843	}
844
845	assert(idx < FIO_IO_U_LAT_U_NR);
846	td->ts.io_u_lat_u[idx]++;
847}
848
849static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec)
850{
851	int idx = 0;
852
853	switch (msec) {
854	default:
855		idx = 11;
856		break;
857	case 1000 ... 1999:
858		idx = 10;
859		break;
860	case 750 ... 999:
861		idx = 9;
862		break;
863	case 500 ... 749:
864		idx = 8;
865		break;
866	case 250 ... 499:
867		idx = 7;
868		break;
869	case 100 ... 249:
870		idx = 6;
871		break;
872	case 50 ... 99:
873		idx = 5;
874		break;
875	case 20 ... 49:
876		idx = 4;
877		break;
878	case 10 ... 19:
879		idx = 3;
880		break;
881	case 4 ... 9:
882		idx = 2;
883		break;
884	case 2 ... 3:
885		idx = 1;
886	case 0 ... 1:
887		break;
888	}
889
890	assert(idx < FIO_IO_U_LAT_M_NR);
891	td->ts.io_u_lat_m[idx]++;
892}
893
894static void io_u_mark_latency(struct thread_data *td, unsigned long usec)
895{
896	if (usec < 1000)
897		io_u_mark_lat_usec(td, usec);
898	else
899		io_u_mark_lat_msec(td, usec / 1000);
900}
901
902/*
903 * Get next file to service by choosing one at random
904 */
905static struct fio_file *get_next_file_rand(struct thread_data *td,
906					   enum fio_file_flags goodf,
907					   enum fio_file_flags badf)
908{
909	struct fio_file *f;
910	int fno;
911
912	do {
913		int opened = 0;
914		unsigned long r;
915
916		if (td->o.use_os_rand) {
917			r = os_random_long(&td->next_file_state);
918			fno = (unsigned int) ((double) td->o.nr_files
919				* (r / (OS_RAND_MAX + 1.0)));
920		} else {
921			r = __rand(&td->__next_file_state);
922			fno = (unsigned int) ((double) td->o.nr_files
923				* (r / (FRAND_MAX + 1.0)));
924		}
925
926		f = td->files[fno];
927		if (fio_file_done(f))
928			continue;
929
930		if (!fio_file_open(f)) {
931			int err;
932
933			err = td_io_open_file(td, f);
934			if (err)
935				continue;
936			opened = 1;
937		}
938
939		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) {
940			dprint(FD_FILE, "get_next_file_rand: %p\n", f);
941			return f;
942		}
943		if (opened)
944			td_io_close_file(td, f);
945	} while (1);
946}
947
948/*
949 * Get next file to service by doing round robin between all available ones
950 */
951static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf,
952					 int badf)
953{
954	unsigned int old_next_file = td->next_file;
955	struct fio_file *f;
956
957	do {
958		int opened = 0;
959
960		f = td->files[td->next_file];
961
962		td->next_file++;
963		if (td->next_file >= td->o.nr_files)
964			td->next_file = 0;
965
966		dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags);
967		if (fio_file_done(f)) {
968			f = NULL;
969			continue;
970		}
971
972		if (!fio_file_open(f)) {
973			int err;
974
975			err = td_io_open_file(td, f);
976			if (err) {
977				dprint(FD_FILE, "error %d on open of %s\n",
978					err, f->file_name);
979				f = NULL;
980				continue;
981			}
982			opened = 1;
983		}
984
985		dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf,
986								f->flags);
987		if ((!goodf || (f->flags & goodf)) && !(f->flags & badf))
988			break;
989
990		if (opened)
991			td_io_close_file(td, f);
992
993		f = NULL;
994	} while (td->next_file != old_next_file);
995
996	dprint(FD_FILE, "get_next_file_rr: %p\n", f);
997	return f;
998}
999
1000static struct fio_file *__get_next_file(struct thread_data *td)
1001{
1002	struct fio_file *f;
1003
1004	assert(td->o.nr_files <= td->files_index);
1005
1006	if (td->nr_done_files >= td->o.nr_files) {
1007		dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d,"
1008				" nr_files=%d\n", td->nr_open_files,
1009						  td->nr_done_files,
1010						  td->o.nr_files);
1011		return NULL;
1012	}
1013
1014	f = td->file_service_file;
1015	if (f && fio_file_open(f) && !fio_file_closing(f)) {
1016		if (td->o.file_service_type == FIO_FSERVICE_SEQ)
1017			goto out;
1018		if (td->file_service_left--)
1019			goto out;
1020	}
1021
1022	if (td->o.file_service_type == FIO_FSERVICE_RR ||
1023	    td->o.file_service_type == FIO_FSERVICE_SEQ)
1024		f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing);
1025	else
1026		f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing);
1027
1028	td->file_service_file = f;
1029	td->file_service_left = td->file_service_nr - 1;
1030out:
1031	dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name);
1032	return f;
1033}
1034
1035static struct fio_file *get_next_file(struct thread_data *td)
1036{
1037	struct prof_io_ops *ops = &td->prof_io_ops;
1038
1039	if (ops->get_next_file)
1040		return ops->get_next_file(td);
1041
1042	return __get_next_file(td);
1043}
1044
1045static int set_io_u_file(struct thread_data *td, struct io_u *io_u)
1046{
1047	struct fio_file *f;
1048
1049	do {
1050		f = get_next_file(td);
1051		if (!f)
1052			return 1;
1053
1054		io_u->file = f;
1055		get_file(f);
1056
1057		if (!fill_io_u(td, io_u))
1058			break;
1059
1060		put_file_log(td, f);
1061		td_io_close_file(td, f);
1062		io_u->file = NULL;
1063		fio_file_set_done(f);
1064		td->nr_done_files++;
1065		dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name,
1066					td->nr_done_files, td->o.nr_files);
1067	} while (1);
1068
1069	return 0;
1070}
1071
1072
1073struct io_u *__get_io_u(struct thread_data *td)
1074{
1075	struct io_u *io_u = NULL;
1076
1077	td_io_u_lock(td);
1078
1079again:
1080	if (!flist_empty(&td->io_u_requeues))
1081		io_u = flist_entry(td->io_u_requeues.next, struct io_u, list);
1082	else if (!queue_full(td)) {
1083		io_u = flist_entry(td->io_u_freelist.next, struct io_u, list);
1084
1085		io_u->buflen = 0;
1086		io_u->resid = 0;
1087		io_u->file = NULL;
1088		io_u->end_io = NULL;
1089	}
1090
1091	if (io_u) {
1092		assert(io_u->flags & IO_U_F_FREE);
1093		io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF);
1094		io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER);
1095		io_u->flags &= ~IO_U_F_VER_LIST;
1096
1097		io_u->error = 0;
1098		flist_del(&io_u->list);
1099		flist_add_tail(&io_u->list, &td->io_u_busylist);
1100		td->cur_depth++;
1101		io_u->flags |= IO_U_F_IN_CUR_DEPTH;
1102	} else if (td->o.verify_async) {
1103		/*
1104		 * We ran out, wait for async verify threads to finish and
1105		 * return one
1106		 */
1107		pthread_cond_wait(&td->free_cond, &td->io_u_lock);
1108		goto again;
1109	}
1110
1111	td_io_u_unlock(td);
1112	return io_u;
1113}
1114
1115static int check_get_trim(struct thread_data *td, struct io_u *io_u)
1116{
1117	if (td->o.trim_backlog && td->trim_entries) {
1118		int get_trim = 0;
1119
1120		if (td->trim_batch) {
1121			td->trim_batch--;
1122			get_trim = 1;
1123		} else if (!(td->io_hist_len % td->o.trim_backlog) &&
1124			 td->last_ddir != DDIR_READ) {
1125			td->trim_batch = td->o.trim_batch;
1126			if (!td->trim_batch)
1127				td->trim_batch = td->o.trim_backlog;
1128			get_trim = 1;
1129		}
1130
1131		if (get_trim && !get_next_trim(td, io_u))
1132			return 1;
1133	}
1134
1135	return 0;
1136}
1137
1138static int check_get_verify(struct thread_data *td, struct io_u *io_u)
1139{
1140	if (td->o.verify_backlog && td->io_hist_len) {
1141		int get_verify = 0;
1142
1143		if (td->verify_batch)
1144			get_verify = 1;
1145		else if (!(td->io_hist_len % td->o.verify_backlog) &&
1146			 td->last_ddir != DDIR_READ) {
1147			td->verify_batch = td->o.verify_batch;
1148			if (!td->verify_batch)
1149				td->verify_batch = td->o.verify_backlog;
1150			get_verify = 1;
1151		}
1152
1153		if (get_verify && !get_next_verify(td, io_u)) {
1154			td->verify_batch--;
1155			return 1;
1156		}
1157	}
1158
1159	return 0;
1160}
1161
1162/*
1163 * Fill offset and start time into the buffer content, to prevent too
1164 * easy compressible data for simple de-dupe attempts. Do this for every
1165 * 512b block in the range, since that should be the smallest block size
1166 * we can expect from a device.
1167 */
1168static void small_content_scramble(struct io_u *io_u)
1169{
1170	unsigned int i, nr_blocks = io_u->buflen / 512;
1171	unsigned long long boffset;
1172	unsigned int offset;
1173	void *p, *end;
1174
1175	if (!nr_blocks)
1176		return;
1177
1178	p = io_u->xfer_buf;
1179	boffset = io_u->offset;
1180	io_u->buf_filled_len = 0;
1181
1182	for (i = 0; i < nr_blocks; i++) {
1183		/*
1184		 * Fill the byte offset into a "random" start offset of
1185		 * the buffer, given by the product of the usec time
1186		 * and the actual offset.
1187		 */
1188		offset = (io_u->start_time.tv_usec ^ boffset) & 511;
1189		offset &= ~(sizeof(unsigned long long) - 1);
1190		if (offset >= 512 - sizeof(unsigned long long))
1191			offset -= sizeof(unsigned long long);
1192		memcpy(p + offset, &boffset, sizeof(boffset));
1193
1194		end = p + 512 - sizeof(io_u->start_time);
1195		memcpy(end, &io_u->start_time, sizeof(io_u->start_time));
1196		p += 512;
1197		boffset += 512;
1198	}
1199}
1200
1201/*
1202 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
1203 * etc. The returned io_u is fully ready to be prepped and submitted.
1204 */
1205struct io_u *get_io_u(struct thread_data *td)
1206{
1207	struct fio_file *f;
1208	struct io_u *io_u;
1209	int do_scramble = 0;
1210
1211	io_u = __get_io_u(td);
1212	if (!io_u) {
1213		dprint(FD_IO, "__get_io_u failed\n");
1214		return NULL;
1215	}
1216
1217	if (check_get_verify(td, io_u))
1218		goto out;
1219	if (check_get_trim(td, io_u))
1220		goto out;
1221
1222	/*
1223	 * from a requeue, io_u already setup
1224	 */
1225	if (io_u->file)
1226		goto out;
1227
1228	/*
1229	 * If using an iolog, grab next piece if any available.
1230	 */
1231	if (td->o.read_iolog_file) {
1232		if (read_iolog_get(td, io_u))
1233			goto err_put;
1234	} else if (set_io_u_file(td, io_u)) {
1235		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
1236		goto err_put;
1237	}
1238
1239	f = io_u->file;
1240	assert(fio_file_open(f));
1241
1242	if (ddir_rw(io_u->ddir)) {
1243		if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
1244			dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
1245			goto err_put;
1246		}
1247
1248		f->last_start = io_u->offset;
1249		f->last_pos = io_u->offset + io_u->buflen;
1250
1251		if (io_u->ddir == DDIR_WRITE) {
1252			if (td->o.refill_buffers) {
1253				io_u_fill_buffer(td, io_u,
1254					io_u->xfer_buflen, io_u->xfer_buflen);
1255			} else if (td->o.scramble_buffers)
1256				do_scramble = 1;
1257			if (td->o.verify != VERIFY_NONE) {
1258				populate_verify_io_u(td, io_u);
1259				do_scramble = 0;
1260			}
1261		} else if (io_u->ddir == DDIR_READ) {
1262			/*
1263			 * Reset the buf_filled parameters so next time if the
1264			 * buffer is used for writes it is refilled.
1265			 */
1266			io_u->buf_filled_len = 0;
1267		}
1268	}
1269
1270	/*
1271	 * Set io data pointers.
1272	 */
1273	io_u->xfer_buf = io_u->buf;
1274	io_u->xfer_buflen = io_u->buflen;
1275
1276out:
1277	assert(io_u->file);
1278	if (!td_io_prep(td, io_u)) {
1279		if (!td->o.disable_slat)
1280			fio_gettime(&io_u->start_time, NULL);
1281		if (do_scramble)
1282			small_content_scramble(io_u);
1283		return io_u;
1284	}
1285err_put:
1286	dprint(FD_IO, "get_io_u failed\n");
1287	put_io_u(td, io_u);
1288	return NULL;
1289}
1290
1291void io_u_log_error(struct thread_data *td, struct io_u *io_u)
1292{
1293	enum error_type_bit eb = td_error_type(io_u->ddir, io_u->error);
1294	const char *msg[] = { "read", "write", "sync", "datasync",
1295				"sync_file_range", "wait", "trim" };
1296
1297	if (td_non_fatal_error(td, eb, io_u->error) && !td->o.error_dump)
1298		return;
1299
1300	log_err("fio: io_u error");
1301
1302	if (io_u->file)
1303		log_err(" on file %s", io_u->file->file_name);
1304
1305	log_err(": %s\n", strerror(io_u->error));
1306
1307	log_err("     %s offset=%llu, buflen=%lu\n", msg[io_u->ddir],
1308					io_u->offset, io_u->xfer_buflen);
1309
1310	if (!td->error)
1311		td_verror(td, io_u->error, "io_u error");
1312}
1313
1314static void account_io_completion(struct thread_data *td, struct io_u *io_u,
1315				  struct io_completion_data *icd,
1316				  const enum fio_ddir idx, unsigned int bytes)
1317{
1318	unsigned long uninitialized_var(lusec);
1319
1320	if (!td->o.disable_clat || !td->o.disable_bw)
1321		lusec = utime_since(&io_u->issue_time, &icd->time);
1322
1323	if (!td->o.disable_lat) {
1324		unsigned long tusec;
1325
1326		tusec = utime_since(&io_u->start_time, &icd->time);
1327		add_lat_sample(td, idx, tusec, bytes);
1328
1329		if (td->o.max_latency && tusec > td->o.max_latency) {
1330			if (!td->error)
1331				log_err("fio: latency of %lu usec exceeds specified max (%u usec)\n", tusec, td->o.max_latency);
1332			td_verror(td, ETIME, "max latency exceeded");
1333			icd->error = ETIME;
1334		}
1335	}
1336
1337	if (!td->o.disable_clat) {
1338		add_clat_sample(td, idx, lusec, bytes);
1339		io_u_mark_latency(td, lusec);
1340	}
1341
1342	if (!td->o.disable_bw)
1343		add_bw_sample(td, idx, bytes, &icd->time);
1344
1345	add_iops_sample(td, idx, &icd->time);
1346}
1347
1348static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir)
1349{
1350	unsigned long long secs, remainder, bps, bytes;
1351	bytes = td->this_io_bytes[ddir];
1352	bps = td->rate_bps[ddir];
1353	secs = bytes / bps;
1354	remainder = bytes % bps;
1355	return remainder * 1000000 / bps + secs * 1000000;
1356}
1357
1358static void io_completed(struct thread_data *td, struct io_u *io_u,
1359			 struct io_completion_data *icd)
1360{
1361	/*
1362	 * Older gcc's are too dumb to realize that usec is always used
1363	 * initialized, silence that warning.
1364	 */
1365	unsigned long uninitialized_var(usec);
1366	struct fio_file *f;
1367
1368	dprint_io_u(io_u, "io complete");
1369
1370	td_io_u_lock(td);
1371	assert(io_u->flags & IO_U_F_FLIGHT);
1372	io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK);
1373	td_io_u_unlock(td);
1374
1375	if (ddir_sync(io_u->ddir)) {
1376		td->last_was_sync = 1;
1377		f = io_u->file;
1378		if (f) {
1379			f->first_write = -1ULL;
1380			f->last_write = -1ULL;
1381		}
1382		return;
1383	}
1384
1385	td->last_was_sync = 0;
1386	td->last_ddir = io_u->ddir;
1387
1388	if (!io_u->error && ddir_rw(io_u->ddir)) {
1389		unsigned int bytes = io_u->buflen - io_u->resid;
1390		const enum fio_ddir idx = io_u->ddir;
1391		const enum fio_ddir odx = io_u->ddir ^ 1;
1392		int ret;
1393
1394		td->io_blocks[idx]++;
1395		td->this_io_blocks[idx]++;
1396		td->io_bytes[idx] += bytes;
1397
1398		if (!(io_u->flags & IO_U_F_VER_LIST))
1399			td->this_io_bytes[idx] += bytes;
1400
1401		if (idx == DDIR_WRITE) {
1402			f = io_u->file;
1403			if (f) {
1404				if (f->first_write == -1ULL ||
1405				    io_u->offset < f->first_write)
1406					f->first_write = io_u->offset;
1407				if (f->last_write == -1ULL ||
1408				    ((io_u->offset + bytes) > f->last_write))
1409					f->last_write = io_u->offset + bytes;
1410			}
1411		}
1412
1413		if (ramp_time_over(td) && (td->runstate == TD_RUNNING ||
1414					   td->runstate == TD_VERIFYING)) {
1415			account_io_completion(td, io_u, icd, idx, bytes);
1416
1417			if (__should_check_rate(td, idx)) {
1418				td->rate_pending_usleep[idx] =
1419					(usec_for_io(td, idx) -
1420					 utime_since_now(&td->start));
1421			}
1422			if (idx != DDIR_TRIM && __should_check_rate(td, odx))
1423				td->rate_pending_usleep[odx] =
1424					(usec_for_io(td, odx) -
1425					 utime_since_now(&td->start));
1426		}
1427
1428		if (td_write(td) && idx == DDIR_WRITE &&
1429		    td->o.do_verify &&
1430		    td->o.verify != VERIFY_NONE)
1431			log_io_piece(td, io_u);
1432
1433		icd->bytes_done[idx] += bytes;
1434
1435		if (io_u->end_io) {
1436			ret = io_u->end_io(td, io_u);
1437			if (ret && !icd->error)
1438				icd->error = ret;
1439		}
1440	} else if (io_u->error) {
1441		icd->error = io_u->error;
1442		io_u_log_error(td, io_u);
1443	}
1444	if (icd->error) {
1445		enum error_type_bit eb = td_error_type(io_u->ddir, icd->error);
1446		if (!td_non_fatal_error(td, eb, icd->error))
1447			return;
1448		/*
1449		 * If there is a non_fatal error, then add to the error count
1450		 * and clear all the errors.
1451		 */
1452		update_error_count(td, icd->error);
1453		td_clear_error(td);
1454		icd->error = 0;
1455		io_u->error = 0;
1456	}
1457}
1458
1459static void init_icd(struct thread_data *td, struct io_completion_data *icd,
1460		     int nr)
1461{
1462	int ddir;
1463	if (!td->o.disable_clat || !td->o.disable_bw)
1464		fio_gettime(&icd->time, NULL);
1465
1466	icd->nr = nr;
1467
1468	icd->error = 0;
1469	for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1470		icd->bytes_done[ddir] = 0;
1471}
1472
1473static void ios_completed(struct thread_data *td,
1474			  struct io_completion_data *icd)
1475{
1476	struct io_u *io_u;
1477	int i;
1478
1479	for (i = 0; i < icd->nr; i++) {
1480		io_u = td->io_ops->event(td, i);
1481
1482		io_completed(td, io_u, icd);
1483
1484		if (!(io_u->flags & IO_U_F_FREE_DEF))
1485			put_io_u(td, io_u);
1486	}
1487}
1488
1489/*
1490 * Complete a single io_u for the sync engines.
1491 */
1492int io_u_sync_complete(struct thread_data *td, struct io_u *io_u,
1493		       unsigned long *bytes)
1494{
1495	struct io_completion_data icd;
1496
1497	init_icd(td, &icd, 1);
1498	io_completed(td, io_u, &icd);
1499
1500	if (!(io_u->flags & IO_U_F_FREE_DEF))
1501		put_io_u(td, io_u);
1502
1503	if (icd.error) {
1504		td_verror(td, icd.error, "io_u_sync_complete");
1505		return -1;
1506	}
1507
1508	if (bytes) {
1509		int ddir;
1510
1511		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1512			bytes[ddir] += icd.bytes_done[ddir];
1513	}
1514
1515	return 0;
1516}
1517
1518/*
1519 * Called to complete min_events number of io for the async engines.
1520 */
1521int io_u_queued_complete(struct thread_data *td, int min_evts,
1522			 unsigned long *bytes)
1523{
1524	struct io_completion_data icd;
1525	struct timespec *tvp = NULL;
1526	int ret;
1527	struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };
1528
1529	dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);
1530
1531	if (!min_evts)
1532		tvp = &ts;
1533
1534	ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
1535	if (ret < 0) {
1536		td_verror(td, -ret, "td_io_getevents");
1537		return ret;
1538	} else if (!ret)
1539		return ret;
1540
1541	init_icd(td, &icd, ret);
1542	ios_completed(td, &icd);
1543	if (icd.error) {
1544		td_verror(td, icd.error, "io_u_queued_complete");
1545		return -1;
1546	}
1547
1548	if (bytes) {
1549		int ddir;
1550
1551		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
1552			bytes[ddir] += icd.bytes_done[ddir];
1553	}
1554
1555	return 0;
1556}
1557
1558/*
1559 * Call when io_u is really queued, to update the submission latency.
1560 */
1561void io_u_queued(struct thread_data *td, struct io_u *io_u)
1562{
1563	if (!td->o.disable_slat) {
1564		unsigned long slat_time;
1565
1566		slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
1567		add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
1568	}
1569}
1570
1571/*
1572 * "randomly" fill the buffer contents
1573 */
1574void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
1575		      unsigned int min_write, unsigned int max_bs)
1576{
1577	io_u->buf_filled_len = 0;
1578
1579	if (!td->o.zero_buffers) {
1580		unsigned int perc = td->o.compress_percentage;
1581
1582		if (perc) {
1583			unsigned int seg = min_write;
1584
1585			seg = min(min_write, td->o.compress_chunk);
1586			fill_random_buf_percentage(&td->buf_state, io_u->buf,
1587						perc, seg, max_bs);
1588		} else
1589			fill_random_buf(&td->buf_state, io_u->buf, max_bs);
1590	} else
1591		memset(io_u->buf, 0, max_bs);
1592}
1593