1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29	td->error = 0;
30	td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38	int r, new_layout = 0, unlink_file = 0, flags;
39	unsigned long long left;
40	unsigned int bs;
41	char *b = NULL;
42
43	if (read_only) {
44		log_err("fio: refusing extend of file due to read-only\n");
45		return 0;
46	}
47
48	/*
49	 * check if we need to lay the file out complete again. fio
50	 * does that for operations involving reads, or for writes
51	 * where overwrite is set
52	 */
53	if (td_read(td) ||
54	   (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55	    (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56		new_layout = 1;
57	if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58		unlink_file = 1;
59
60	if (unlink_file || new_layout) {
61		dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62		if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
63			td_verror(td, errno, "unlink");
64			return 1;
65		}
66	}
67
68	flags = O_WRONLY | O_CREAT;
69	if (new_layout)
70		flags |= O_TRUNC;
71
72#ifdef WIN32
73	flags |= _O_BINARY;
74#endif
75
76	dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
77	f->fd = open(f->file_name, flags, 0644);
78	if (f->fd < 0) {
79		td_verror(td, errno, "open");
80		return 1;
81	}
82
83#ifdef CONFIG_POSIX_FALLOCATE
84	if (!td->o.fill_device) {
85		switch (td->o.fallocate_mode) {
86		case FIO_FALLOCATE_NONE:
87			break;
88		case FIO_FALLOCATE_POSIX:
89			dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
90				 f->file_name,
91				 (unsigned long long) f->real_file_size);
92
93			r = posix_fallocate(f->fd, 0, f->real_file_size);
94			if (r > 0) {
95				log_err("fio: posix_fallocate fails: %s\n",
96						strerror(r));
97			}
98			break;
99#ifdef CONFIG_LINUX_FALLOCATE
100		case FIO_FALLOCATE_KEEP_SIZE:
101			dprint(FD_FILE,
102				"fallocate(FALLOC_FL_KEEP_SIZE) "
103				"file %s size %llu\n", f->file_name,
104				(unsigned long long) f->real_file_size);
105
106			r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
107					f->real_file_size);
108			if (r != 0)
109				td_verror(td, errno, "fallocate");
110
111			break;
112#endif /* CONFIG_LINUX_FALLOCATE */
113		default:
114			log_err("fio: unknown fallocate mode: %d\n",
115				td->o.fallocate_mode);
116			assert(0);
117		}
118	}
119#endif /* CONFIG_POSIX_FALLOCATE */
120
121	if (!new_layout)
122		goto done;
123
124	/*
125	 * The size will be -1ULL when fill_device is used, so don't truncate
126	 * or fallocate this file, just write it
127	 */
128	if (!td->o.fill_device) {
129		dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
130					(unsigned long long) f->real_file_size);
131		if (ftruncate(f->fd, f->real_file_size) == -1) {
132			if (errno != EFBIG) {
133				td_verror(td, errno, "ftruncate");
134				goto err;
135			}
136		}
137	}
138
139	b = malloc(td->o.max_bs[DDIR_WRITE]);
140
141	left = f->real_file_size;
142	while (left && !td->terminate) {
143		bs = td->o.max_bs[DDIR_WRITE];
144		if (bs > left)
145			bs = left;
146
147		fill_io_buffer(td, b, bs, bs);
148
149		r = write(f->fd, b, bs);
150
151		if (r > 0) {
152			left -= r;
153			continue;
154		} else {
155			if (r < 0) {
156				int __e = errno;
157
158				if (__e == ENOSPC) {
159					if (td->o.fill_device)
160						break;
161					log_info("fio: ENOSPC on laying out "
162						 "file, stopping\n");
163					break;
164				}
165				td_verror(td, errno, "write");
166			} else
167				td_verror(td, EIO, "write");
168
169			break;
170		}
171	}
172
173	if (td->terminate) {
174		dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
175		unlink(f->file_name);
176	} else if (td->o.create_fsync) {
177		if (fsync(f->fd) < 0) {
178			td_verror(td, errno, "fsync");
179			goto err;
180		}
181	}
182	if (td->o.fill_device && !td_write(td)) {
183		fio_file_clear_size_known(f);
184		if (td_io_get_file_size(td, f))
185			goto err;
186		if (f->io_size > f->real_file_size)
187			f->io_size = f->real_file_size;
188	}
189
190	free(b);
191done:
192	return 0;
193err:
194	close(f->fd);
195	f->fd = -1;
196	if (b)
197		free(b);
198	return 1;
199}
200
201static int pre_read_file(struct thread_data *td, struct fio_file *f)
202{
203	int ret = 0, r, did_open = 0, old_runstate;
204	unsigned long long left;
205	unsigned int bs;
206	char *b;
207
208	if (td->io_ops->flags & FIO_PIPEIO)
209		return 0;
210
211	if (!fio_file_open(f)) {
212		if (td->io_ops->open_file(td, f)) {
213			log_err("fio: cannot pre-read, failed to open file\n");
214			return 1;
215		}
216		did_open = 1;
217	}
218
219	old_runstate = td_bump_runstate(td, TD_PRE_READING);
220
221	bs = td->o.max_bs[DDIR_READ];
222	b = malloc(bs);
223	memset(b, 0, bs);
224
225	if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
226		td_verror(td, errno, "lseek");
227		log_err("fio: failed to lseek pre-read file\n");
228		ret = 1;
229		goto error;
230	}
231
232	left = f->io_size;
233
234	while (left && !td->terminate) {
235		if (bs > left)
236			bs = left;
237
238		r = read(f->fd, b, bs);
239
240		if (r == (int) bs) {
241			left -= bs;
242			continue;
243		} else {
244			td_verror(td, EIO, "pre_read");
245			break;
246		}
247	}
248
249error:
250	td_restore_runstate(td, old_runstate);
251
252	if (did_open)
253		td->io_ops->close_file(td, f);
254
255	free(b);
256	return ret;
257}
258
259static unsigned long long get_rand_file_size(struct thread_data *td)
260{
261	unsigned long long ret, sized;
262	unsigned long r;
263
264	if (td->o.use_os_rand) {
265		r = os_random_long(&td->file_size_state);
266		sized = td->o.file_size_high - td->o.file_size_low;
267		ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
268	} else {
269		r = __rand(&td->__file_size_state);
270		sized = td->o.file_size_high - td->o.file_size_low;
271		ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
272	}
273
274	ret += td->o.file_size_low;
275	ret -= (ret % td->o.rw_min_bs);
276	return ret;
277}
278
279static int file_size(struct thread_data *td, struct fio_file *f)
280{
281	struct stat st;
282
283	if (stat(f->file_name, &st) == -1) {
284		td_verror(td, errno, "fstat");
285		return 1;
286	}
287
288	f->real_file_size = st.st_size;
289	return 0;
290}
291
292static int bdev_size(struct thread_data *td, struct fio_file *f)
293{
294	unsigned long long bytes = 0;
295	int r;
296
297	if (td->io_ops->open_file(td, f)) {
298		log_err("fio: failed opening blockdev %s for size check\n",
299			f->file_name);
300		return 1;
301	}
302
303	r = blockdev_size(f, &bytes);
304	if (r) {
305		td_verror(td, r, "blockdev_size");
306		goto err;
307	}
308
309	if (!bytes) {
310		log_err("%s: zero sized block device?\n", f->file_name);
311		goto err;
312	}
313
314	f->real_file_size = bytes;
315	td->io_ops->close_file(td, f);
316	return 0;
317err:
318	td->io_ops->close_file(td, f);
319	return 1;
320}
321
322static int char_size(struct thread_data *td, struct fio_file *f)
323{
324#ifdef FIO_HAVE_CHARDEV_SIZE
325	unsigned long long bytes = 0;
326	int r;
327
328	if (td->io_ops->open_file(td, f)) {
329		log_err("fio: failed opening blockdev %s for size check\n",
330			f->file_name);
331		return 1;
332	}
333
334	r = chardev_size(f, &bytes);
335	if (r) {
336		td_verror(td, r, "chardev_size");
337		goto err;
338	}
339
340	if (!bytes) {
341		log_err("%s: zero sized char device?\n", f->file_name);
342		goto err;
343	}
344
345	f->real_file_size = bytes;
346	td->io_ops->close_file(td, f);
347	return 0;
348err:
349	td->io_ops->close_file(td, f);
350	return 1;
351#else
352	f->real_file_size = -1ULL;
353	return 0;
354#endif
355}
356
357static int get_file_size(struct thread_data *td, struct fio_file *f)
358{
359	int ret = 0;
360
361	if (fio_file_size_known(f))
362		return 0;
363
364	if (f->filetype == FIO_TYPE_FILE)
365		ret = file_size(td, f);
366	else if (f->filetype == FIO_TYPE_BD)
367		ret = bdev_size(td, f);
368	else if (f->filetype == FIO_TYPE_CHAR)
369		ret = char_size(td, f);
370	else
371		f->real_file_size = -1;
372
373	if (ret)
374		return ret;
375
376	if (f->file_offset > f->real_file_size) {
377		log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
378					(unsigned long long) f->file_offset,
379					(unsigned long long) f->real_file_size);
380		return 1;
381	}
382
383	fio_file_set_size_known(f);
384	return 0;
385}
386
387static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
388				   unsigned long long off,
389				   unsigned long long len)
390{
391	int ret = 0;
392
393	if (len == -1ULL)
394		len = f->io_size;
395	if (off == -1ULL)
396		off = f->file_offset;
397
398	if (len == -1ULL || off == -1ULL)
399		return 0;
400
401	dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
402								len);
403
404	if (f->mmap_ptr) {
405		ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
406#ifdef FIO_MADV_FREE
407		if (f->filetype == FIO_TYPE_BD)
408			(void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
409#endif
410	} else if (f->filetype == FIO_TYPE_FILE) {
411		ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
412	} else if (f->filetype == FIO_TYPE_BD) {
413		ret = blockdev_invalidate_cache(f);
414		if (ret < 0 && errno == EACCES && geteuid()) {
415			if (!root_warn) {
416				log_err("fio: only root may flush block "
417					"devices. Cache flush bypassed!\n");
418				root_warn = 1;
419			}
420			ret = 0;
421		}
422	} else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
423		ret = 0;
424
425	/*
426	 * Cache flushing isn't a fatal condition, and we know it will
427	 * happen on some platforms where we don't have the proper
428	 * function to flush eg block device caches. So just warn and
429	 * continue on our way.
430	 */
431	if (ret) {
432		log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
433		ret = 0;
434	}
435
436	return 0;
437
438}
439
440int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
441{
442	if (!fio_file_open(f))
443		return 0;
444
445	return __file_invalidate_cache(td, f, -1ULL, -1ULL);
446}
447
448int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
449{
450	int ret = 0;
451
452	dprint(FD_FILE, "fd close %s\n", f->file_name);
453
454	remove_file_hash(f);
455
456	if (close(f->fd) < 0)
457		ret = errno;
458
459	f->fd = -1;
460
461	if (f->shadow_fd != -1) {
462		close(f->shadow_fd);
463		f->shadow_fd = -1;
464	}
465
466	f->engine_data = 0;
467	return ret;
468}
469
470int file_lookup_open(struct fio_file *f, int flags)
471{
472	struct fio_file *__f;
473	int from_hash;
474
475	__f = lookup_file_hash(f->file_name);
476	if (__f) {
477		dprint(FD_FILE, "found file in hash %s\n", f->file_name);
478		/*
479		 * racy, need the __f->lock locked
480		 */
481		f->lock = __f->lock;
482		from_hash = 1;
483	} else {
484		dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
485		from_hash = 0;
486	}
487
488#ifdef WIN32
489	flags |= _O_BINARY;
490#endif
491
492	f->fd = open(f->file_name, flags, 0600);
493	return from_hash;
494}
495
496static int file_close_shadow_fds(struct thread_data *td)
497{
498	struct fio_file *f;
499	int num_closed = 0;
500	unsigned int i;
501
502	for_each_file(td, f, i) {
503		if (f->shadow_fd == -1)
504			continue;
505
506		close(f->shadow_fd);
507		f->shadow_fd = -1;
508		num_closed++;
509	}
510
511	return num_closed;
512}
513
514int generic_open_file(struct thread_data *td, struct fio_file *f)
515{
516	int is_std = 0;
517	int flags = 0;
518	int from_hash = 0;
519
520	dprint(FD_FILE, "fd open %s\n", f->file_name);
521
522	if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
523		log_err("fio: trim only applies to block device\n");
524		return 1;
525	}
526
527	if (!strcmp(f->file_name, "-")) {
528		if (td_rw(td)) {
529			log_err("fio: can't read/write to stdin/out\n");
530			return 1;
531		}
532		is_std = 1;
533
534		/*
535		 * move output logging to stderr, if we are writing to stdout
536		 */
537		if (td_write(td))
538			f_out = stderr;
539	}
540
541	if (td_trim(td))
542		goto skip_flags;
543	if (td->o.odirect)
544		flags |= OS_O_DIRECT;
545	if (td->o.oatomic) {
546		if (!FIO_O_ATOMIC) {
547			td_verror(td, EINVAL, "OS does not support atomic IO");
548			return 1;
549		}
550		flags |= OS_O_DIRECT | FIO_O_ATOMIC;
551	}
552	if (td->o.sync_io)
553		flags |= O_SYNC;
554	if (td->o.create_on_open)
555		flags |= O_CREAT;
556skip_flags:
557	if (f->filetype != FIO_TYPE_FILE)
558		flags |= FIO_O_NOATIME;
559
560open_again:
561	if (td_write(td)) {
562		if (!read_only)
563			flags |= O_RDWR;
564
565		if (f->filetype == FIO_TYPE_FILE)
566			flags |= O_CREAT;
567
568		if (is_std)
569			f->fd = dup(STDOUT_FILENO);
570		else
571			from_hash = file_lookup_open(f, flags);
572	} else if (td_read(td)) {
573		if (f->filetype == FIO_TYPE_CHAR && !read_only)
574			flags |= O_RDWR;
575		else
576			flags |= O_RDONLY;
577
578		if (is_std)
579			f->fd = dup(STDIN_FILENO);
580		else
581			from_hash = file_lookup_open(f, flags);
582	} else { //td trim
583		flags |= O_RDWR;
584		from_hash = file_lookup_open(f, flags);
585	}
586
587	if (f->fd == -1) {
588		char buf[FIO_VERROR_SIZE];
589		int __e = errno;
590
591		if (__e == EPERM && (flags & FIO_O_NOATIME)) {
592			flags &= ~FIO_O_NOATIME;
593			goto open_again;
594		}
595		if (__e == EMFILE && file_close_shadow_fds(td))
596			goto open_again;
597
598		snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
599
600		if (__e == EINVAL && (flags & OS_O_DIRECT)) {
601			log_err("fio: looks like your file system does not " \
602				"support direct=1/buffered=0\n");
603		}
604
605		td_verror(td, __e, buf);
606	}
607
608	if (!from_hash && f->fd != -1) {
609		if (add_file_hash(f)) {
610			int fio_unused ret;
611
612			/*
613			 * Stash away descriptor for later close. This is to
614			 * work-around a "feature" on Linux, where a close of
615			 * an fd that has been opened for write will trigger
616			 * udev to call blkid to check partitions, fs id, etc.
617			 * That pollutes the device cache, which can slow down
618			 * unbuffered accesses.
619			 */
620			if (f->shadow_fd == -1)
621				f->shadow_fd = f->fd;
622			else {
623				/*
624			 	 * OK to ignore, we haven't done anything
625				 * with it
626				 */
627				ret = generic_close_file(td, f);
628			}
629			goto open_again;
630		}
631	}
632
633	return 0;
634}
635
636int generic_get_file_size(struct thread_data *td, struct fio_file *f)
637{
638	return get_file_size(td, f);
639}
640
641/*
642 * open/close all files, so that ->real_file_size gets set
643 */
644static int get_file_sizes(struct thread_data *td)
645{
646	struct fio_file *f;
647	unsigned int i;
648	int err = 0;
649
650	for_each_file(td, f, i) {
651		dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
652								f->file_name);
653
654		if (td_io_get_file_size(td, f)) {
655			if (td->error != ENOENT) {
656				log_err("%s\n", td->verror);
657				err = 1;
658			}
659			clear_error(td);
660		}
661
662		if (f->real_file_size == -1ULL && td->o.size)
663			f->real_file_size = td->o.size / td->o.nr_files;
664	}
665
666	return err;
667}
668
669struct fio_mount {
670	struct flist_head list;
671	const char *base;
672	char __base[256];
673	unsigned int key;
674};
675
676/*
677 * Get free number of bytes for each file on each unique mount.
678 */
679static unsigned long long get_fs_free_counts(struct thread_data *td)
680{
681	struct flist_head *n, *tmp;
682	unsigned long long ret = 0;
683	struct fio_mount *fm;
684	FLIST_HEAD(list);
685	struct fio_file *f;
686	unsigned int i;
687
688	for_each_file(td, f, i) {
689		struct stat sb;
690		char buf[256];
691
692		if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
693			if (f->real_file_size != -1ULL)
694				ret += f->real_file_size;
695			continue;
696		} else if (f->filetype != FIO_TYPE_FILE)
697			continue;
698
699		buf[255] = '\0';
700		strncpy(buf, f->file_name, 255);
701
702		if (stat(buf, &sb) < 0) {
703			if (errno != ENOENT)
704				break;
705			strcpy(buf, ".");
706			if (stat(buf, &sb) < 0)
707				break;
708		}
709
710		fm = NULL;
711		flist_for_each(n, &list) {
712			fm = flist_entry(n, struct fio_mount, list);
713			if (fm->key == sb.st_dev)
714				break;
715
716			fm = NULL;
717		}
718
719		if (fm)
720			continue;
721
722		fm = calloc(1, sizeof(*fm));
723		strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
724		fm->base = basename(fm->__base);
725		fm->key = sb.st_dev;
726		flist_add(&fm->list, &list);
727	}
728
729	flist_for_each_safe(n, tmp, &list) {
730		unsigned long long sz;
731
732		fm = flist_entry(n, struct fio_mount, list);
733		flist_del(&fm->list);
734
735		sz = get_fs_size(fm->base);
736		if (sz && sz != -1ULL)
737			ret += sz;
738
739		free(fm);
740	}
741
742	return ret;
743}
744
745uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
746{
747	struct thread_options *o = &td->o;
748
749	if (o->file_append && f->filetype == FIO_TYPE_FILE)
750		return f->real_file_size;
751
752	return td->o.start_offset +
753		(td->thread_number - 1) * td->o.offset_increment;
754}
755
756/*
757 * Open the files and setup files sizes, creating files if necessary.
758 */
759int setup_files(struct thread_data *td)
760{
761	unsigned long long total_size, extend_size;
762	struct thread_options *o = &td->o;
763	struct fio_file *f;
764	unsigned int i, nr_fs_extra = 0;
765	int err = 0, need_extend;
766	int old_state;
767	const unsigned int bs = td_min_bs(td);
768	uint64_t fs = 0;
769
770	dprint(FD_FILE, "setup files\n");
771
772	old_state = td_bump_runstate(td, TD_SETTING_UP);
773
774	if (o->read_iolog_file)
775		goto done;
776
777	/*
778	 * if ioengine defines a setup() method, it's responsible for
779	 * opening the files and setting f->real_file_size to indicate
780	 * the valid range for that file.
781	 */
782	if (td->io_ops->setup)
783		err = td->io_ops->setup(td);
784	else
785		err = get_file_sizes(td);
786
787	if (err)
788		goto err_out;
789
790	/*
791	 * check sizes. if the files/devices do not exist and the size
792	 * isn't passed to fio, abort.
793	 */
794	total_size = 0;
795	for_each_file(td, f, i) {
796		if (f->real_file_size == -1ULL)
797			total_size = -1ULL;
798		else
799			total_size += f->real_file_size;
800	}
801
802	if (o->fill_device)
803		td->fill_device_size = get_fs_free_counts(td);
804
805	/*
806	 * device/file sizes are zero and no size given, punt
807	 */
808	if ((!total_size || total_size == -1ULL) && !o->size &&
809	    !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
810	    !(o->nr_files && (o->file_size_low || o->file_size_high))) {
811		log_err("%s: you need to specify size=\n", o->name);
812		td_verror(td, EINVAL, "total_file_size");
813		goto err_out;
814	}
815
816	/*
817	 * Calculate per-file size and potential extra size for the
818	 * first files, if needed.
819	 */
820	if (!o->file_size_low && o->nr_files) {
821		uint64_t all_fs;
822
823		fs = o->size / o->nr_files;
824		all_fs = fs * o->nr_files;
825
826		if (all_fs < o->size)
827			nr_fs_extra = (o->size - all_fs) / bs;
828	}
829
830	/*
831	 * now file sizes are known, so we can set ->io_size. if size= is
832	 * not given, ->io_size is just equal to ->real_file_size. if size
833	 * is given, ->io_size is size / nr_files.
834	 */
835	extend_size = total_size = 0;
836	need_extend = 0;
837	for_each_file(td, f, i) {
838		f->file_offset = get_start_offset(td, f);
839
840		if (!o->file_size_low) {
841			/*
842			 * no file size range given, file size is equal to
843			 * total size divided by number of files. If that is
844			 * zero, set it to the real file size. If the size
845			 * doesn't divide nicely with the min blocksize,
846			 * make the first files bigger.
847			 */
848			f->io_size = fs;
849			if (nr_fs_extra) {
850				nr_fs_extra--;
851				f->io_size += bs;
852			}
853
854			if (!f->io_size)
855				f->io_size = f->real_file_size - f->file_offset;
856		} else if (f->real_file_size < o->file_size_low ||
857			   f->real_file_size > o->file_size_high) {
858			if (f->file_offset > o->file_size_low)
859				goto err_offset;
860			/*
861			 * file size given. if it's fixed, use that. if it's a
862			 * range, generate a random size in-between.
863			 */
864			if (o->file_size_low == o->file_size_high)
865				f->io_size = o->file_size_low - f->file_offset;
866			else {
867				f->io_size = get_rand_file_size(td)
868						- f->file_offset;
869			}
870		} else
871			f->io_size = f->real_file_size - f->file_offset;
872
873		if (f->io_size == -1ULL)
874			total_size = -1ULL;
875		else {
876                        if (o->size_percent)
877                                f->io_size = (f->io_size * o->size_percent) / 100;
878			total_size += f->io_size;
879		}
880
881		if (f->filetype == FIO_TYPE_FILE &&
882		    (f->io_size + f->file_offset) > f->real_file_size &&
883		    !(td->io_ops->flags & FIO_DISKLESSIO)) {
884			if (!o->create_on_open) {
885				need_extend++;
886				extend_size += (f->io_size + f->file_offset);
887			} else
888				f->real_file_size = f->io_size + f->file_offset;
889			fio_file_set_extend(f);
890		}
891	}
892
893	if (!o->size || o->size > total_size)
894		o->size = total_size;
895
896	if (o->size < td_min_bs(td)) {
897		log_err("fio: blocksize too large for data set\n");
898		goto err_out;
899	}
900
901	/*
902	 * See if we need to extend some files
903	 */
904	if (need_extend) {
905		temp_stall_ts = 1;
906		if (output_format == FIO_OUTPUT_NORMAL)
907			log_info("%s: Laying out IO file(s) (%u file(s) /"
908				 " %lluMB)\n", o->name, need_extend,
909					extend_size >> 20);
910
911		for_each_file(td, f, i) {
912			unsigned long long old_len = -1ULL, extend_len = -1ULL;
913
914			if (!fio_file_extend(f))
915				continue;
916
917			assert(f->filetype == FIO_TYPE_FILE);
918			fio_file_clear_extend(f);
919			if (!o->fill_device) {
920				old_len = f->real_file_size;
921				extend_len = f->io_size + f->file_offset -
922						old_len;
923			}
924			f->real_file_size = (f->io_size + f->file_offset);
925			err = extend_file(td, f);
926			if (err)
927				break;
928
929			err = __file_invalidate_cache(td, f, old_len,
930								extend_len);
931
932			/*
933			 * Shut up static checker
934			 */
935			if (f->fd != -1)
936				close(f->fd);
937
938			f->fd = -1;
939			if (err)
940				break;
941		}
942		temp_stall_ts = 0;
943	}
944
945	if (err)
946		goto err_out;
947
948	if (!o->zone_size)
949		o->zone_size = o->size;
950
951	/*
952	 * iolog already set the total io size, if we read back
953	 * stored entries.
954	 */
955	if (!o->read_iolog_file) {
956		if (o->io_limit)
957			td->total_io_size = o->io_limit * o->loops;
958		else
959			td->total_io_size = o->size * o->loops;
960	}
961
962done:
963	if (o->create_only)
964		td->done = 1;
965
966	td_restore_runstate(td, old_state);
967	return 0;
968err_offset:
969	log_err("%s: you need to specify valid offset=\n", o->name);
970err_out:
971	td_restore_runstate(td, old_state);
972	return 1;
973}
974
975int pre_read_files(struct thread_data *td)
976{
977	struct fio_file *f;
978	unsigned int i;
979
980	dprint(FD_FILE, "pre_read files\n");
981
982	for_each_file(td, f, i) {
983		pre_read_file(td, f);
984	}
985
986	return 1;
987}
988
989static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
990{
991	unsigned int range_size, seed;
992	unsigned long nranges;
993	uint64_t file_size;
994
995	range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
996	file_size = min(f->real_file_size, f->io_size);
997
998	nranges = (file_size + range_size - 1) / range_size;
999
1000	seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1001	if (!td->o.rand_repeatable)
1002		seed = td->rand_seeds[4];
1003
1004	if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1005		zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1006	else
1007		pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1008
1009	return 1;
1010}
1011
1012static int init_rand_distribution(struct thread_data *td)
1013{
1014	struct fio_file *f;
1015	unsigned int i;
1016	int state;
1017
1018	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1019		return 0;
1020
1021	state = td_bump_runstate(td, TD_SETTING_UP);
1022
1023	for_each_file(td, f, i)
1024		__init_rand_distribution(td, f);
1025
1026	td_restore_runstate(td, state);
1027
1028	return 1;
1029}
1030
1031int init_random_map(struct thread_data *td)
1032{
1033	unsigned long long blocks;
1034	struct fio_file *f;
1035	unsigned int i;
1036
1037	if (init_rand_distribution(td))
1038		return 0;
1039	if (!td_random(td))
1040		return 0;
1041
1042	for_each_file(td, f, i) {
1043		uint64_t file_size = min(f->real_file_size, f->io_size);
1044
1045		blocks = file_size / (unsigned long long) td->o.rw_min_bs;
1046
1047		if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1048			unsigned long seed;
1049
1050			seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1051
1052			if (!lfsr_init(&f->lfsr, blocks, seed, 0))
1053				continue;
1054		} else if (!td->o.norandommap) {
1055			f->io_axmap = axmap_new(blocks);
1056			if (f->io_axmap)
1057				continue;
1058		} else if (td->o.norandommap)
1059			continue;
1060
1061		if (!td->o.softrandommap) {
1062			log_err("fio: failed allocating random map. If running"
1063				" a large number of jobs, try the 'norandommap'"
1064				" option or set 'softrandommap'. Or give"
1065				" a larger --alloc-size to fio.\n");
1066			return 1;
1067		}
1068
1069		log_info("fio: file %s failed allocating random map. Running "
1070			 "job without.\n", f->file_name);
1071	}
1072
1073	return 0;
1074}
1075
1076void close_files(struct thread_data *td)
1077{
1078	struct fio_file *f;
1079	unsigned int i;
1080
1081	for_each_file(td, f, i) {
1082		if (fio_file_open(f))
1083			td_io_close_file(td, f);
1084	}
1085}
1086
1087void close_and_free_files(struct thread_data *td)
1088{
1089	struct fio_file *f;
1090	unsigned int i;
1091
1092	dprint(FD_FILE, "close files\n");
1093
1094	for_each_file(td, f, i) {
1095		if (fio_file_open(f))
1096			td_io_close_file(td, f);
1097
1098		remove_file_hash(f);
1099
1100		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1101			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1102			unlink(f->file_name);
1103		}
1104
1105		sfree(f->file_name);
1106		f->file_name = NULL;
1107		axmap_free(f->io_axmap);
1108		f->io_axmap = NULL;
1109		sfree(f);
1110	}
1111
1112	td->o.filename = NULL;
1113	free(td->files);
1114	free(td->file_locks);
1115	td->files_index = 0;
1116	td->files = NULL;
1117	td->file_locks = NULL;
1118	td->o.file_lock_mode = FILE_LOCK_NONE;
1119	td->o.nr_files = 0;
1120}
1121
1122static void get_file_type(struct fio_file *f)
1123{
1124	struct stat sb;
1125
1126	if (!strcmp(f->file_name, "-"))
1127		f->filetype = FIO_TYPE_PIPE;
1128	else
1129		f->filetype = FIO_TYPE_FILE;
1130
1131	/* \\.\ is the device namespace in Windows, where every file is
1132	 * a block device */
1133	if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1134		f->filetype = FIO_TYPE_BD;
1135
1136	if (!stat(f->file_name, &sb)) {
1137		if (S_ISBLK(sb.st_mode))
1138			f->filetype = FIO_TYPE_BD;
1139		else if (S_ISCHR(sb.st_mode))
1140			f->filetype = FIO_TYPE_CHAR;
1141		else if (S_ISFIFO(sb.st_mode))
1142			f->filetype = FIO_TYPE_PIPE;
1143	}
1144}
1145
1146static int __is_already_allocated(const char *fname)
1147{
1148	struct flist_head *entry;
1149	char *filename;
1150
1151	if (flist_empty(&filename_list))
1152		return 0;
1153
1154	flist_for_each(entry, &filename_list) {
1155		filename = flist_entry(entry, struct file_name, list)->filename;
1156
1157		if (strcmp(filename, fname) == 0)
1158			return 1;
1159	}
1160
1161	return 0;
1162}
1163
1164static int is_already_allocated(const char *fname)
1165{
1166	int ret;
1167
1168	fio_file_hash_lock();
1169	ret = __is_already_allocated(fname);
1170	fio_file_hash_unlock();
1171	return ret;
1172}
1173
1174static void set_already_allocated(const char *fname)
1175{
1176	struct file_name *fn;
1177
1178	fn = malloc(sizeof(struct file_name));
1179	fn->filename = strdup(fname);
1180
1181	fio_file_hash_lock();
1182	if (!__is_already_allocated(fname)) {
1183		flist_add_tail(&fn->list, &filename_list);
1184		fn = NULL;
1185	}
1186	fio_file_hash_unlock();
1187
1188	if (fn) {
1189		free(fn->filename);
1190		free(fn);
1191	}
1192}
1193
1194
1195static void free_already_allocated(void)
1196{
1197	struct flist_head *entry, *tmp;
1198	struct file_name *fn;
1199
1200	if (flist_empty(&filename_list))
1201		return;
1202
1203	fio_file_hash_lock();
1204	flist_for_each_safe(entry, tmp, &filename_list) {
1205		fn = flist_entry(entry, struct file_name, list);
1206		free(fn->filename);
1207		flist_del(&fn->list);
1208		free(fn);
1209	}
1210
1211	fio_file_hash_unlock();
1212}
1213
1214static struct fio_file *alloc_new_file(struct thread_data *td)
1215{
1216	struct fio_file *f;
1217
1218	f = smalloc(sizeof(*f));
1219	if (!f) {
1220		log_err("fio: smalloc OOM\n");
1221		assert(0);
1222		return NULL;
1223	}
1224
1225	f->fd = -1;
1226	f->shadow_fd = -1;
1227	fio_file_reset(td, f);
1228	return f;
1229}
1230
1231int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1232{
1233	int cur_files = td->files_index;
1234	char file_name[PATH_MAX];
1235	struct fio_file *f;
1236	int len = 0;
1237
1238	dprint(FD_FILE, "add file %s\n", fname);
1239
1240	if (td->o.directory)
1241		len = set_name_idx(file_name, td->o.directory, numjob);
1242
1243	sprintf(file_name + len, "%s", fname);
1244
1245	/* clean cloned siblings using existing files */
1246	if (numjob && is_already_allocated(file_name))
1247		return 0;
1248
1249	f = alloc_new_file(td);
1250
1251	if (td->files_size <= td->files_index) {
1252		unsigned int new_size = td->o.nr_files + 1;
1253
1254		dprint(FD_FILE, "resize file array to %d files\n", new_size);
1255
1256		td->files = realloc(td->files, new_size * sizeof(f));
1257		if (td->files == NULL) {
1258			log_err("fio: realloc OOM\n");
1259			assert(0);
1260		}
1261		if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1262			td->file_locks = realloc(td->file_locks, new_size);
1263			if (!td->file_locks) {
1264				log_err("fio: realloc OOM\n");
1265				assert(0);
1266			}
1267			td->file_locks[cur_files] = FILE_LOCK_NONE;
1268		}
1269		td->files_size = new_size;
1270	}
1271	td->files[cur_files] = f;
1272	f->fileno = cur_files;
1273
1274	/*
1275	 * init function, io engine may not be loaded yet
1276	 */
1277	if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1278		f->real_file_size = -1ULL;
1279
1280	f->file_name = smalloc_strdup(file_name);
1281	if (!f->file_name) {
1282		log_err("fio: smalloc OOM\n");
1283		assert(0);
1284	}
1285
1286	get_file_type(f);
1287
1288	switch (td->o.file_lock_mode) {
1289	case FILE_LOCK_NONE:
1290		break;
1291	case FILE_LOCK_READWRITE:
1292		f->rwlock = fio_rwlock_init();
1293		break;
1294	case FILE_LOCK_EXCLUSIVE:
1295		f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1296		break;
1297	default:
1298		log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1299		assert(0);
1300	}
1301
1302	td->files_index++;
1303	if (f->filetype == FIO_TYPE_FILE)
1304		td->nr_normal_files++;
1305
1306	set_already_allocated(file_name);
1307
1308	if (inc)
1309		td->o.nr_files++;
1310
1311	dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1312							cur_files);
1313
1314	return cur_files;
1315}
1316
1317int add_file_exclusive(struct thread_data *td, const char *fname)
1318{
1319	struct fio_file *f;
1320	unsigned int i;
1321
1322	for_each_file(td, f, i) {
1323		if (!strcmp(f->file_name, fname))
1324			return i;
1325	}
1326
1327	return add_file(td, fname, 0, 1);
1328}
1329
1330void get_file(struct fio_file *f)
1331{
1332	dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1333	assert(fio_file_open(f));
1334	f->references++;
1335}
1336
1337int put_file(struct thread_data *td, struct fio_file *f)
1338{
1339	int f_ret = 0, ret = 0;
1340
1341	dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1342
1343	if (!fio_file_open(f)) {
1344		assert(f->fd == -1);
1345		return 0;
1346	}
1347
1348	assert(f->references);
1349	if (--f->references)
1350		return 0;
1351
1352	if (should_fsync(td) && td->o.fsync_on_close) {
1353		f_ret = fsync(f->fd);
1354		if (f_ret < 0)
1355			f_ret = errno;
1356	}
1357
1358	if (td->io_ops->close_file)
1359		ret = td->io_ops->close_file(td, f);
1360
1361	if (!ret)
1362		ret = f_ret;
1363
1364	td->nr_open_files--;
1365	fio_file_clear_open(f);
1366	assert(f->fd == -1);
1367	return ret;
1368}
1369
1370void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1371{
1372	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1373		return;
1374
1375	if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1376		if (ddir == DDIR_READ)
1377			fio_rwlock_read(f->rwlock);
1378		else
1379			fio_rwlock_write(f->rwlock);
1380	} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1381		fio_mutex_down(f->lock);
1382
1383	td->file_locks[f->fileno] = td->o.file_lock_mode;
1384}
1385
1386void unlock_file(struct thread_data *td, struct fio_file *f)
1387{
1388	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1389		return;
1390
1391	if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1392		fio_rwlock_unlock(f->rwlock);
1393	else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1394		fio_mutex_up(f->lock);
1395
1396	td->file_locks[f->fileno] = FILE_LOCK_NONE;
1397}
1398
1399void unlock_file_all(struct thread_data *td, struct fio_file *f)
1400{
1401	if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1402		return;
1403	if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1404		unlock_file(td, f);
1405}
1406
1407static int recurse_dir(struct thread_data *td, const char *dirname)
1408{
1409	struct dirent *dir;
1410	int ret = 0;
1411	DIR *D;
1412
1413	D = opendir(dirname);
1414	if (!D) {
1415		char buf[FIO_VERROR_SIZE];
1416
1417		snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1418		td_verror(td, errno, buf);
1419		return 1;
1420	}
1421
1422	while ((dir = readdir(D)) != NULL) {
1423		char full_path[PATH_MAX];
1424		struct stat sb;
1425
1426		if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1427			continue;
1428
1429		sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1430
1431		if (lstat(full_path, &sb) == -1) {
1432			if (errno != ENOENT) {
1433				td_verror(td, errno, "stat");
1434				ret = 1;
1435				break;
1436			}
1437		}
1438
1439		if (S_ISREG(sb.st_mode)) {
1440			add_file(td, full_path, 0, 1);
1441			continue;
1442		}
1443		if (!S_ISDIR(sb.st_mode))
1444			continue;
1445
1446		ret = recurse_dir(td, full_path);
1447		if (ret)
1448			break;
1449	}
1450
1451	closedir(D);
1452	return ret;
1453}
1454
1455int add_dir_files(struct thread_data *td, const char *path)
1456{
1457	int ret = recurse_dir(td, path);
1458
1459	if (!ret)
1460		log_info("fio: opendir added %d files\n", td->o.nr_files);
1461
1462	return ret;
1463}
1464
1465void dup_files(struct thread_data *td, struct thread_data *org)
1466{
1467	struct fio_file *f;
1468	unsigned int i;
1469
1470	dprint(FD_FILE, "dup files: %d\n", org->files_index);
1471
1472	if (!org->files)
1473		return;
1474
1475	td->files = malloc(org->files_index * sizeof(f));
1476
1477	if (td->o.file_lock_mode != FILE_LOCK_NONE)
1478		td->file_locks = malloc(org->files_index);
1479
1480	for_each_file(org, f, i) {
1481		struct fio_file *__f;
1482
1483		__f = alloc_new_file(td);
1484
1485		if (f->file_name) {
1486			__f->file_name = smalloc_strdup(f->file_name);
1487			if (!__f->file_name) {
1488				log_err("fio: smalloc OOM\n");
1489				assert(0);
1490			}
1491
1492			__f->filetype = f->filetype;
1493		}
1494
1495		if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1496			__f->lock = f->lock;
1497		else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1498			__f->rwlock = f->rwlock;
1499
1500		td->files[i] = __f;
1501	}
1502}
1503
1504/*
1505 * Returns the index that matches the filename, or -1 if not there
1506 */
1507int get_fileno(struct thread_data *td, const char *fname)
1508{
1509	struct fio_file *f;
1510	unsigned int i;
1511
1512	for_each_file(td, f, i)
1513		if (!strcmp(f->file_name, fname))
1514			return i;
1515
1516	return -1;
1517}
1518
1519/*
1520 * For log usage, where we add/open/close files automatically
1521 */
1522void free_release_files(struct thread_data *td)
1523{
1524	close_files(td);
1525	td->files_index = 0;
1526	td->nr_normal_files = 0;
1527}
1528
1529void fio_file_reset(struct thread_data *td, struct fio_file *f)
1530{
1531	f->last_pos = f->file_offset;
1532	f->last_start = -1ULL;
1533	if (f->io_axmap)
1534		axmap_reset(f->io_axmap);
1535	if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1536		lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1537}
1538
1539int fio_files_done(struct thread_data *td)
1540{
1541	struct fio_file *f;
1542	unsigned int i;
1543
1544	for_each_file(td, f, i)
1545		if (!fio_file_done(f))
1546			return 0;
1547
1548	return 1;
1549}
1550
1551/* free memory used in initialization phase only */
1552void filesetup_mem_free(void)
1553{
1554	free_already_allocated();
1555}
1556