1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27/*
28 * List entry for filename_list
29 */
30struct file_name {
31	struct flist_head list;
32	char *filename;
33};
34
35static inline void clear_error(struct thread_data *td)
36{
37	td->error = 0;
38	td->verror[0] = '\0';
39}
40
41/*
42 * Leaves f->fd open on success, caller must close
43 */
44static int extend_file(struct thread_data *td, struct fio_file *f)
45{
46	int r, new_layout = 0, unlink_file = 0, flags;
47	unsigned long long left;
48	unsigned int bs;
49	char *b = NULL;
50
51	if (read_only) {
52		log_err("fio: refusing extend of file due to read-only\n");
53		return 0;
54	}
55
56	/*
57	 * check if we need to lay the file out complete again. fio
58	 * does that for operations involving reads, or for writes
59	 * where overwrite is set
60	 */
61	if (td_read(td) ||
62	   (td_write(td) && td->o.overwrite && !td->o.file_append) ||
63	    (td_write(td) && td_ioengine_flagged(td, FIO_NOEXTEND)))
64		new_layout = 1;
65	if (td_write(td) && !td->o.overwrite && !td->o.file_append)
66		unlink_file = 1;
67
68	if (unlink_file || new_layout) {
69		int ret;
70
71		dprint(FD_FILE, "layout unlink %s\n", f->file_name);
72
73		ret = td_io_unlink_file(td, f);
74		if (ret != 0 && ret != ENOENT) {
75			td_verror(td, errno, "unlink");
76			return 1;
77		}
78	}
79
80	flags = O_WRONLY;
81	if (td->o.allow_create)
82		flags |= O_CREAT;
83	if (new_layout)
84		flags |= O_TRUNC;
85
86#ifdef WIN32
87	flags |= _O_BINARY;
88#endif
89
90	dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
91	f->fd = open(f->file_name, flags, 0644);
92	if (f->fd < 0) {
93		int err = errno;
94
95		if (err == ENOENT && !td->o.allow_create)
96			log_err("fio: file creation disallowed by "
97					"allow_file_create=0\n");
98		else
99			td_verror(td, err, "open");
100		return 1;
101	}
102
103#ifdef CONFIG_POSIX_FALLOCATE
104	if (!td->o.fill_device) {
105		switch (td->o.fallocate_mode) {
106		case FIO_FALLOCATE_NONE:
107			break;
108		case FIO_FALLOCATE_POSIX:
109			dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
110				 f->file_name,
111				 (unsigned long long) f->real_file_size);
112
113			r = posix_fallocate(f->fd, 0, f->real_file_size);
114			if (r > 0) {
115				log_err("fio: posix_fallocate fails: %s\n",
116						strerror(r));
117			}
118			break;
119#ifdef CONFIG_LINUX_FALLOCATE
120		case FIO_FALLOCATE_KEEP_SIZE:
121			dprint(FD_FILE,
122				"fallocate(FALLOC_FL_KEEP_SIZE) "
123				"file %s size %llu\n", f->file_name,
124				(unsigned long long) f->real_file_size);
125
126			r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
127					f->real_file_size);
128			if (r != 0)
129				td_verror(td, errno, "fallocate");
130
131			break;
132#endif /* CONFIG_LINUX_FALLOCATE */
133		default:
134			log_err("fio: unknown fallocate mode: %d\n",
135				td->o.fallocate_mode);
136			assert(0);
137		}
138	}
139#endif /* CONFIG_POSIX_FALLOCATE */
140
141	/*
142	 * If our jobs don't require regular files initially, we're done.
143	 */
144	if (!new_layout)
145		goto done;
146
147	/*
148	 * The size will be -1ULL when fill_device is used, so don't truncate
149	 * or fallocate this file, just write it
150	 */
151	if (!td->o.fill_device) {
152		dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
153					(unsigned long long) f->real_file_size);
154		if (ftruncate(f->fd, f->real_file_size) == -1) {
155			if (errno != EFBIG) {
156				td_verror(td, errno, "ftruncate");
157				goto err;
158			}
159		}
160	}
161
162	left = f->real_file_size;
163	bs = td->o.max_bs[DDIR_WRITE];
164	if (bs > left)
165		bs = left;
166
167	b = malloc(bs);
168	if (!b) {
169		td_verror(td, errno, "malloc");
170		goto err;
171	}
172
173	while (left && !td->terminate) {
174		if (bs > left)
175			bs = left;
176
177		fill_io_buffer(td, b, bs, bs);
178
179		r = write(f->fd, b, bs);
180
181		if (r > 0) {
182			left -= r;
183			continue;
184		} else {
185			if (r < 0) {
186				int __e = errno;
187
188				if (__e == ENOSPC) {
189					if (td->o.fill_device)
190						break;
191					log_info("fio: ENOSPC on laying out "
192						 "file, stopping\n");
193					break;
194				}
195				td_verror(td, errno, "write");
196			} else
197				td_verror(td, EIO, "write");
198
199			break;
200		}
201	}
202
203	if (td->terminate) {
204		dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
205		td_io_unlink_file(td, f);
206	} else if (td->o.create_fsync) {
207		if (fsync(f->fd) < 0) {
208			td_verror(td, errno, "fsync");
209			goto err;
210		}
211	}
212	if (td->o.fill_device && !td_write(td)) {
213		fio_file_clear_size_known(f);
214		if (td_io_get_file_size(td, f))
215			goto err;
216		if (f->io_size > f->real_file_size)
217			f->io_size = f->real_file_size;
218	}
219
220	free(b);
221done:
222	return 0;
223err:
224	close(f->fd);
225	f->fd = -1;
226	if (b)
227		free(b);
228	return 1;
229}
230
231static int pre_read_file(struct thread_data *td, struct fio_file *f)
232{
233	int ret = 0, r, did_open = 0, old_runstate;
234	unsigned long long left;
235	unsigned int bs;
236	char *b;
237
238	if (td_ioengine_flagged(td, FIO_PIPEIO) ||
239	    td_ioengine_flagged(td, FIO_NOIO))
240		return 0;
241
242	if (f->filetype == FIO_TYPE_CHAR)
243		return 0;
244
245	if (!fio_file_open(f)) {
246		if (td->io_ops->open_file(td, f)) {
247			log_err("fio: cannot pre-read, failed to open file\n");
248			return 1;
249		}
250		did_open = 1;
251	}
252
253	old_runstate = td_bump_runstate(td, TD_PRE_READING);
254
255	left = f->io_size;
256	bs = td->o.max_bs[DDIR_READ];
257	if (bs > left)
258		bs = left;
259
260	b = malloc(bs);
261	if (!b) {
262		td_verror(td, errno, "malloc");
263		ret = 1;
264		goto error;
265	}
266	memset(b, 0, bs);
267
268	if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
269		td_verror(td, errno, "lseek");
270		log_err("fio: failed to lseek pre-read file\n");
271		ret = 1;
272		goto error;
273	}
274
275	while (left && !td->terminate) {
276		if (bs > left)
277			bs = left;
278
279		r = read(f->fd, b, bs);
280
281		if (r == (int) bs) {
282			left -= bs;
283			continue;
284		} else {
285			td_verror(td, EIO, "pre_read");
286			break;
287		}
288	}
289
290error:
291	td_restore_runstate(td, old_runstate);
292
293	if (did_open)
294		td->io_ops->close_file(td, f);
295
296	free(b);
297	return ret;
298}
299
300unsigned long long get_rand_file_size(struct thread_data *td)
301{
302	unsigned long long ret, sized;
303	uint64_t frand_max;
304	unsigned long r;
305
306	frand_max = rand_max(&td->file_size_state);
307	r = __rand(&td->file_size_state);
308	sized = td->o.file_size_high - td->o.file_size_low;
309	ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0)));
310	ret += td->o.file_size_low;
311	ret -= (ret % td->o.rw_min_bs);
312	return ret;
313}
314
315static int file_size(struct thread_data *td, struct fio_file *f)
316{
317	struct stat st;
318
319	if (stat(f->file_name, &st) == -1) {
320		td_verror(td, errno, "fstat");
321		return 1;
322	}
323
324	f->real_file_size = st.st_size;
325	return 0;
326}
327
328static int bdev_size(struct thread_data *td, struct fio_file *f)
329{
330	unsigned long long bytes = 0;
331	int r;
332
333	if (td->io_ops->open_file(td, f)) {
334		log_err("fio: failed opening blockdev %s for size check\n",
335			f->file_name);
336		return 1;
337	}
338
339	r = blockdev_size(f, &bytes);
340	if (r) {
341		td_verror(td, r, "blockdev_size");
342		goto err;
343	}
344
345	if (!bytes) {
346		log_err("%s: zero sized block device?\n", f->file_name);
347		goto err;
348	}
349
350	f->real_file_size = bytes;
351	td->io_ops->close_file(td, f);
352	return 0;
353err:
354	td->io_ops->close_file(td, f);
355	return 1;
356}
357
358static int char_size(struct thread_data *td, struct fio_file *f)
359{
360#ifdef FIO_HAVE_CHARDEV_SIZE
361	unsigned long long bytes = 0;
362	int r;
363
364	if (td->io_ops->open_file(td, f)) {
365		log_err("fio: failed opening chardev %s for size check\n",
366			f->file_name);
367		return 1;
368	}
369
370	r = chardev_size(f, &bytes);
371	if (r) {
372		td_verror(td, r, "chardev_size");
373		goto err;
374	}
375
376	if (!bytes) {
377		log_err("%s: zero sized char device?\n", f->file_name);
378		goto err;
379	}
380
381	f->real_file_size = bytes;
382	td->io_ops->close_file(td, f);
383	return 0;
384err:
385	td->io_ops->close_file(td, f);
386	return 1;
387#else
388	f->real_file_size = -1ULL;
389	return 0;
390#endif
391}
392
393static int get_file_size(struct thread_data *td, struct fio_file *f)
394{
395	int ret = 0;
396
397	if (fio_file_size_known(f))
398		return 0;
399
400	if (f->filetype == FIO_TYPE_FILE)
401		ret = file_size(td, f);
402	else if (f->filetype == FIO_TYPE_BLOCK)
403		ret = bdev_size(td, f);
404	else if (f->filetype == FIO_TYPE_CHAR)
405		ret = char_size(td, f);
406	else
407		f->real_file_size = -1ULL;
408
409	/*
410	 * Leave ->real_file_size with 0 since it could be expectation
411	 * of initial setup for regular files.
412	 */
413	if (ret)
414		return ret;
415
416	/*
417	 * If ->real_file_size is -1, a conditional for the message
418	 * "offset extends end" is always true, but it makes no sense,
419	 * so just return the same value here.
420	 */
421	if (f->real_file_size == -1ULL) {
422		log_info("%s: failed to get file size of %s\n", td->o.name,
423					f->file_name);
424		return 1;
425	}
426
427	if (td->o.start_offset && f->file_offset == 0)
428		dprint(FD_FILE, "offset of file %s not initialized yet\n",
429					f->file_name);
430	/*
431	 * ->file_offset normally hasn't been initialized yet, so this
432	 * is basically always false.
433	 */
434	if (f->file_offset > f->real_file_size) {
435		log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
436					(unsigned long long) f->file_offset,
437					(unsigned long long) f->real_file_size);
438		return 1;
439	}
440
441	fio_file_set_size_known(f);
442	return 0;
443}
444
445static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
446				   unsigned long long off,
447				   unsigned long long len)
448{
449	int errval = 0, ret = 0;
450
451#ifdef CONFIG_ESX
452	return 0;
453#endif
454
455	if (len == -1ULL)
456		len = f->io_size;
457	if (off == -1ULL)
458		off = f->file_offset;
459
460	if (len == -1ULL || off == -1ULL)
461		return 0;
462
463	if (td->io_ops->invalidate) {
464		dprint(FD_IO, "invalidate %s cache %s\n", td->io_ops->name,
465			f->file_name);
466		ret = td->io_ops->invalidate(td, f);
467		if (ret < 0)
468			errval = -ret;
469	} else if (f->filetype == FIO_TYPE_FILE) {
470		dprint(FD_IO, "declare unneeded cache %s: %llu/%llu\n",
471			f->file_name, off, len);
472		ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
473		if (ret)
474			errval = ret;
475	} else if (f->filetype == FIO_TYPE_BLOCK) {
476		int retry_count = 0;
477
478		dprint(FD_IO, "drop page cache %s\n", f->file_name);
479		ret = blockdev_invalidate_cache(f);
480		while (ret < 0 && errno == EAGAIN && retry_count++ < 25) {
481			/*
482			 * Linux multipath devices reject ioctl while
483			 * the maps are being updated. That window can
484			 * last tens of milliseconds; we'll try up to
485			 * a quarter of a second.
486			 */
487			usleep(10000);
488			ret = blockdev_invalidate_cache(f);
489		}
490		if (ret < 0 && errno == EACCES && geteuid()) {
491			if (!root_warn) {
492				log_err("fio: only root may flush block "
493					"devices. Cache flush bypassed!\n");
494				root_warn = 1;
495			}
496			ret = 0;
497		}
498		if (ret < 0)
499			errval = errno;
500		else if (ret) /* probably not supported */
501			errval = ret;
502	} else if (f->filetype == FIO_TYPE_CHAR ||
503		   f->filetype == FIO_TYPE_PIPE) {
504		dprint(FD_IO, "invalidate not supported %s\n", f->file_name);
505		ret = 0;
506	}
507
508	/*
509	 * Cache flushing isn't a fatal condition, and we know it will
510	 * happen on some platforms where we don't have the proper
511	 * function to flush eg block device caches. So just warn and
512	 * continue on our way.
513	 */
514	if (errval)
515		log_info("fio: cache invalidation of %s failed: %s\n",
516			 f->file_name, strerror(errval));
517
518	return 0;
519
520}
521
522int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
523{
524	if (!fio_file_open(f))
525		return 0;
526
527	return __file_invalidate_cache(td, f, -1ULL, -1ULL);
528}
529
530int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
531{
532	int ret = 0;
533
534	dprint(FD_FILE, "fd close %s\n", f->file_name);
535
536	remove_file_hash(f);
537
538	if (close(f->fd) < 0)
539		ret = errno;
540
541	f->fd = -1;
542
543	if (f->shadow_fd != -1) {
544		close(f->shadow_fd);
545		f->shadow_fd = -1;
546	}
547
548	f->engine_pos = 0;
549	return ret;
550}
551
552int file_lookup_open(struct fio_file *f, int flags)
553{
554	struct fio_file *__f;
555	int from_hash;
556
557	__f = lookup_file_hash(f->file_name);
558	if (__f) {
559		dprint(FD_FILE, "found file in hash %s\n", f->file_name);
560		f->lock = __f->lock;
561		from_hash = 1;
562	} else {
563		dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
564		from_hash = 0;
565	}
566
567#ifdef WIN32
568	flags |= _O_BINARY;
569#endif
570
571	f->fd = open(f->file_name, flags, 0600);
572	return from_hash;
573}
574
575static int file_close_shadow_fds(struct thread_data *td)
576{
577	struct fio_file *f;
578	int num_closed = 0;
579	unsigned int i;
580
581	for_each_file(td, f, i) {
582		if (f->shadow_fd == -1)
583			continue;
584
585		close(f->shadow_fd);
586		f->shadow_fd = -1;
587		num_closed++;
588	}
589
590	return num_closed;
591}
592
593int generic_open_file(struct thread_data *td, struct fio_file *f)
594{
595	int is_std = 0;
596	int flags = 0;
597	int from_hash = 0;
598
599	dprint(FD_FILE, "fd open %s\n", f->file_name);
600
601	if (!strcmp(f->file_name, "-")) {
602		if (td_rw(td)) {
603			log_err("fio: can't read/write to stdin/out\n");
604			return 1;
605		}
606		is_std = 1;
607
608		/*
609		 * move output logging to stderr, if we are writing to stdout
610		 */
611		if (td_write(td))
612			f_out = stderr;
613	}
614
615	if (td_trim(td))
616		goto skip_flags;
617	if (td->o.odirect)
618		flags |= OS_O_DIRECT;
619	if (td->o.oatomic) {
620		if (!FIO_O_ATOMIC) {
621			td_verror(td, EINVAL, "OS does not support atomic IO");
622			return 1;
623		}
624		flags |= OS_O_DIRECT | FIO_O_ATOMIC;
625	}
626	if (td->o.sync_io)
627		flags |= O_SYNC;
628	if (td->o.create_on_open && td->o.allow_create)
629		flags |= O_CREAT;
630skip_flags:
631	if (f->filetype != FIO_TYPE_FILE)
632		flags |= FIO_O_NOATIME;
633
634open_again:
635	if (td_write(td)) {
636		if (!read_only)
637			flags |= O_RDWR;
638
639		if (f->filetype == FIO_TYPE_FILE && td->o.allow_create)
640			flags |= O_CREAT;
641
642		if (is_std)
643			f->fd = dup(STDOUT_FILENO);
644		else
645			from_hash = file_lookup_open(f, flags);
646	} else if (td_read(td)) {
647		if (f->filetype == FIO_TYPE_CHAR && !read_only)
648			flags |= O_RDWR;
649		else
650			flags |= O_RDONLY;
651
652		if (is_std)
653			f->fd = dup(STDIN_FILENO);
654		else
655			from_hash = file_lookup_open(f, flags);
656	} else if (td_trim(td)) {
657		assert(!td_rw(td)); /* should have matched above */
658		flags |= O_RDWR;
659		from_hash = file_lookup_open(f, flags);
660	}
661
662	if (f->fd == -1) {
663		char buf[FIO_VERROR_SIZE];
664		int __e = errno;
665
666		if (__e == EPERM && (flags & FIO_O_NOATIME)) {
667			flags &= ~FIO_O_NOATIME;
668			goto open_again;
669		}
670		if (__e == EMFILE && file_close_shadow_fds(td))
671			goto open_again;
672
673		snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
674
675		if (__e == EINVAL && (flags & OS_O_DIRECT)) {
676			log_err("fio: looks like your file system does not " \
677				"support direct=1/buffered=0\n");
678		}
679
680		td_verror(td, __e, buf);
681		return 1;
682	}
683
684	if (!from_hash && f->fd != -1) {
685		if (add_file_hash(f)) {
686			int fio_unused ret;
687
688			/*
689			 * Stash away descriptor for later close. This is to
690			 * work-around a "feature" on Linux, where a close of
691			 * an fd that has been opened for write will trigger
692			 * udev to call blkid to check partitions, fs id, etc.
693			 * That pollutes the device cache, which can slow down
694			 * unbuffered accesses.
695			 */
696			if (f->shadow_fd == -1)
697				f->shadow_fd = f->fd;
698			else {
699				/*
700			 	 * OK to ignore, we haven't done anything
701				 * with it
702				 */
703				ret = generic_close_file(td, f);
704			}
705			goto open_again;
706		}
707	}
708
709	return 0;
710}
711
712/*
713 * This function i.e. get_file_size() is the default .get_file_size
714 * implementation of majority of I/O engines.
715 */
716int generic_get_file_size(struct thread_data *td, struct fio_file *f)
717{
718	return get_file_size(td, f);
719}
720
721/*
722 * open/close all files, so that ->real_file_size gets set
723 */
724static int get_file_sizes(struct thread_data *td)
725{
726	struct fio_file *f;
727	unsigned int i;
728	int err = 0;
729
730	for_each_file(td, f, i) {
731		dprint(FD_FILE, "get file size for %p/%d/%s\n", f, i,
732								f->file_name);
733
734		if (td_io_get_file_size(td, f)) {
735			if (td->error != ENOENT) {
736				log_err("%s\n", td->verror);
737				err = 1;
738				break;
739			}
740			clear_error(td);
741		}
742
743		/*
744		 * There are corner cases where we end up with -1 for
745		 * ->real_file_size due to unsupported file type, etc.
746		 * We then just set to size option value divided by number
747		 * of files, similar to the way file ->io_size is set.
748		 * stat(2) failure doesn't set ->real_file_size to -1.
749		 */
750		if (f->real_file_size == -1ULL && td->o.size)
751			f->real_file_size = td->o.size / td->o.nr_files;
752	}
753
754	return err;
755}
756
757struct fio_mount {
758	struct flist_head list;
759	const char *base;
760	char __base[256];
761	unsigned int key;
762};
763
764/*
765 * Get free number of bytes for each file on each unique mount.
766 */
767static unsigned long long get_fs_free_counts(struct thread_data *td)
768{
769	struct flist_head *n, *tmp;
770	unsigned long long ret = 0;
771	struct fio_mount *fm;
772	FLIST_HEAD(list);
773	struct fio_file *f;
774	unsigned int i;
775
776	for_each_file(td, f, i) {
777		struct stat sb;
778		char buf[256];
779
780		if (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_CHAR) {
781			if (f->real_file_size != -1ULL)
782				ret += f->real_file_size;
783			continue;
784		} else if (f->filetype != FIO_TYPE_FILE)
785			continue;
786
787		buf[255] = '\0';
788		strncpy(buf, f->file_name, 255);
789
790		if (stat(buf, &sb) < 0) {
791			if (errno != ENOENT)
792				break;
793			strcpy(buf, ".");
794			if (stat(buf, &sb) < 0)
795				break;
796		}
797
798		fm = NULL;
799		flist_for_each(n, &list) {
800			fm = flist_entry(n, struct fio_mount, list);
801			if (fm->key == sb.st_dev)
802				break;
803
804			fm = NULL;
805		}
806
807		if (fm)
808			continue;
809
810		fm = calloc(1, sizeof(*fm));
811		strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
812		fm->base = basename(fm->__base);
813		fm->key = sb.st_dev;
814		flist_add(&fm->list, &list);
815	}
816
817	flist_for_each_safe(n, tmp, &list) {
818		unsigned long long sz;
819
820		fm = flist_entry(n, struct fio_mount, list);
821		flist_del(&fm->list);
822
823		sz = get_fs_free_size(fm->base);
824		if (sz && sz != -1ULL)
825			ret += sz;
826
827		free(fm);
828	}
829
830	return ret;
831}
832
833uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
834{
835	struct thread_options *o = &td->o;
836
837	if (o->file_append && f->filetype == FIO_TYPE_FILE)
838		return f->real_file_size;
839
840	return td->o.start_offset +
841		td->subjob_number * td->o.offset_increment;
842}
843
844/*
845 * Open the files and setup files sizes, creating files if necessary.
846 */
847int setup_files(struct thread_data *td)
848{
849	unsigned long long total_size, extend_size;
850	struct thread_options *o = &td->o;
851	struct fio_file *f;
852	unsigned int i, nr_fs_extra = 0;
853	int err = 0, need_extend;
854	int old_state;
855	const unsigned int bs = td_min_bs(td);
856	uint64_t fs = 0;
857
858	dprint(FD_FILE, "setup files\n");
859
860	old_state = td_bump_runstate(td, TD_SETTING_UP);
861
862	if (o->read_iolog_file)
863		goto done;
864
865	/*
866	 * Find out physical size of files or devices for this thread,
867	 * before we determine I/O size and range of our targets.
868	 * If ioengine defines a setup() method, it's responsible for
869	 * opening the files and setting f->real_file_size to indicate
870	 * the valid range for that file.
871	 */
872	if (td->io_ops->setup)
873		err = td->io_ops->setup(td);
874	else
875		err = get_file_sizes(td);
876
877	if (err)
878		goto err_out;
879
880	/*
881	 * check sizes. if the files/devices do not exist and the size
882	 * isn't passed to fio, abort.
883	 */
884	total_size = 0;
885	for_each_file(td, f, i) {
886		f->fileno = i;
887		if (f->real_file_size == -1ULL)
888			total_size = -1ULL;
889		else
890			total_size += f->real_file_size;
891	}
892
893	if (o->fill_device)
894		td->fill_device_size = get_fs_free_counts(td);
895
896	/*
897	 * device/file sizes are zero and no size given, punt
898	 */
899	if ((!total_size || total_size == -1ULL) && !o->size &&
900	    !td_ioengine_flagged(td, FIO_NOIO) && !o->fill_device &&
901	    !(o->nr_files && (o->file_size_low || o->file_size_high))) {
902		log_err("%s: you need to specify size=\n", o->name);
903		td_verror(td, EINVAL, "total_file_size");
904		goto err_out;
905	}
906
907	/*
908	 * Calculate per-file size and potential extra size for the
909	 * first files, if needed (i.e. if we don't have a fixed size).
910	 */
911	if (!o->file_size_low && o->nr_files) {
912		uint64_t all_fs;
913
914		fs = o->size / o->nr_files;
915		all_fs = fs * o->nr_files;
916
917		if (all_fs < o->size)
918			nr_fs_extra = (o->size - all_fs) / bs;
919	}
920
921	/*
922	 * now file sizes are known, so we can set ->io_size. if size= is
923	 * not given, ->io_size is just equal to ->real_file_size. if size
924	 * is given, ->io_size is size / nr_files.
925	 */
926	extend_size = total_size = 0;
927	need_extend = 0;
928	for_each_file(td, f, i) {
929		f->file_offset = get_start_offset(td, f);
930
931		/*
932		 * Update ->io_size depending on options specified.
933		 * ->file_size_low being 0 means filesize option isn't set.
934		 * Non zero ->file_size_low equals ->file_size_high means
935		 * filesize option is set in a fixed size format.
936		 * Non zero ->file_size_low not equals ->file_size_high means
937		 * filesize option is set in a range format.
938		 */
939		if (!o->file_size_low) {
940			/*
941			 * no file size or range given, file size is equal to
942			 * total size divided by number of files. If the size
943			 * doesn't divide nicely with the min blocksize,
944			 * make the first files bigger.
945			 */
946			f->io_size = fs;
947			if (nr_fs_extra) {
948				nr_fs_extra--;
949				f->io_size += bs;
950			}
951
952			/*
953			 * We normally don't come here for regular files, but
954			 * if the result is 0 for a regular file, set it to the
955			 * real file size. This could be size of the existing
956			 * one if it already exists, but otherwise will be set
957			 * to 0. A new file won't be created because
958			 * ->io_size + ->file_offset equals ->real_file_size.
959			 */
960			if (!f->io_size) {
961				if (f->file_offset > f->real_file_size)
962					goto err_offset;
963				f->io_size = f->real_file_size - f->file_offset;
964				if (!f->io_size)
965					log_info("fio: file %s may be ignored\n",
966						f->file_name);
967			}
968		} else if (f->real_file_size < o->file_size_low ||
969			   f->real_file_size > o->file_size_high) {
970			if (f->file_offset > o->file_size_low)
971				goto err_offset;
972			/*
973			 * file size given. if it's fixed, use that. if it's a
974			 * range, generate a random size in-between.
975			 */
976			if (o->file_size_low == o->file_size_high)
977				f->io_size = o->file_size_low - f->file_offset;
978			else {
979				f->io_size = get_rand_file_size(td)
980						- f->file_offset;
981			}
982		} else
983			f->io_size = f->real_file_size - f->file_offset;
984
985		if (f->io_size == -1ULL)
986			total_size = -1ULL;
987		else {
988                        if (o->size_percent) {
989				f->io_size = (f->io_size * o->size_percent) / 100;
990				f->io_size -= (f->io_size % td_min_bs(td));
991			}
992			total_size += f->io_size;
993		}
994
995		if (f->filetype == FIO_TYPE_FILE &&
996		    (f->io_size + f->file_offset) > f->real_file_size &&
997		    !td_ioengine_flagged(td, FIO_DISKLESSIO)) {
998			if (!o->create_on_open) {
999				need_extend++;
1000				extend_size += (f->io_size + f->file_offset);
1001				fio_file_set_extend(f);
1002			} else
1003				f->real_file_size = f->io_size + f->file_offset;
1004		}
1005	}
1006
1007	if (td->o.block_error_hist) {
1008		int len;
1009
1010		assert(td->o.nr_files == 1);	/* checked in fixup_options */
1011		f = td->files[0];
1012		len = f->io_size / td->o.bs[DDIR_TRIM];
1013		if (len > MAX_NR_BLOCK_INFOS || len <= 0) {
1014			log_err("fio: cannot calculate block histogram with "
1015				"%d trim blocks, maximum %d\n",
1016				len, MAX_NR_BLOCK_INFOS);
1017			td_verror(td, EINVAL, "block_error_hist");
1018			goto err_out;
1019		}
1020
1021		td->ts.nr_block_infos = len;
1022		for (i = 0; i < len; i++)
1023			td->ts.block_infos[i] =
1024				BLOCK_INFO(0, BLOCK_STATE_UNINIT);
1025	} else
1026		td->ts.nr_block_infos = 0;
1027
1028	if (!o->size || (total_size && o->size > total_size))
1029		o->size = total_size;
1030
1031	if (o->size < td_min_bs(td)) {
1032		log_err("fio: blocksize too large for data set\n");
1033		goto err_out;
1034	}
1035
1036	/*
1037	 * See if we need to extend some files, typically needed when our
1038	 * target regular files don't exist yet, but our jobs require them
1039	 * initially due to read I/Os.
1040	 */
1041	if (need_extend) {
1042		temp_stall_ts = 1;
1043		if (output_format & FIO_OUTPUT_NORMAL) {
1044			log_info("%s: Laying out IO file%s (%u file%s / %s%lluMiB)\n",
1045				 o->name,
1046				 need_extend > 1 ? "s" : "",
1047				 need_extend,
1048				 need_extend > 1 ? "s" : "",
1049				 need_extend > 1 ? "total " : "",
1050				 extend_size >> 20);
1051		}
1052
1053		for_each_file(td, f, i) {
1054			unsigned long long old_len = -1ULL, extend_len = -1ULL;
1055
1056			if (!fio_file_extend(f))
1057				continue;
1058
1059			assert(f->filetype == FIO_TYPE_FILE);
1060			fio_file_clear_extend(f);
1061			if (!o->fill_device) {
1062				old_len = f->real_file_size;
1063				extend_len = f->io_size + f->file_offset -
1064						old_len;
1065			}
1066			f->real_file_size = (f->io_size + f->file_offset);
1067			err = extend_file(td, f);
1068			if (err)
1069				break;
1070
1071			err = __file_invalidate_cache(td, f, old_len,
1072								extend_len);
1073
1074			/*
1075			 * Shut up static checker
1076			 */
1077			if (f->fd != -1)
1078				close(f->fd);
1079
1080			f->fd = -1;
1081			if (err)
1082				break;
1083		}
1084		temp_stall_ts = 0;
1085	}
1086
1087	if (err)
1088		goto err_out;
1089
1090	if (!o->zone_size)
1091		o->zone_size = o->size;
1092
1093	/*
1094	 * iolog already set the total io size, if we read back
1095	 * stored entries.
1096	 */
1097	if (!o->read_iolog_file) {
1098		if (o->io_size)
1099			td->total_io_size = o->io_size * o->loops;
1100		else
1101			td->total_io_size = o->size * o->loops;
1102	}
1103
1104done:
1105	if (o->create_only)
1106		td->done = 1;
1107
1108	td_restore_runstate(td, old_state);
1109	return 0;
1110err_offset:
1111	log_err("%s: you need to specify valid offset=\n", o->name);
1112err_out:
1113	td_restore_runstate(td, old_state);
1114	return 1;
1115}
1116
1117int pre_read_files(struct thread_data *td)
1118{
1119	struct fio_file *f;
1120	unsigned int i;
1121
1122	dprint(FD_FILE, "pre_read files\n");
1123
1124	for_each_file(td, f, i) {
1125		if (pre_read_file(td, f))
1126			return -1;
1127	}
1128
1129	return 0;
1130}
1131
1132static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
1133{
1134	unsigned int range_size, seed;
1135	unsigned long nranges;
1136	uint64_t fsize;
1137
1138	range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1139	fsize = min(f->real_file_size, f->io_size);
1140
1141	nranges = (fsize + range_size - 1) / range_size;
1142
1143	seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1144	if (!td->o.rand_repeatable)
1145		seed = td->rand_seeds[4];
1146
1147	if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1148		zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1149	else if (td->o.random_distribution == FIO_RAND_DIST_PARETO)
1150		pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1151	else if (td->o.random_distribution == FIO_RAND_DIST_GAUSS)
1152		gauss_init(&f->gauss, nranges, td->o.gauss_dev.u.f, seed);
1153
1154	return 1;
1155}
1156
1157static int init_rand_distribution(struct thread_data *td)
1158{
1159	struct fio_file *f;
1160	unsigned int i;
1161	int state;
1162
1163	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1164		return 0;
1165
1166	state = td_bump_runstate(td, TD_SETTING_UP);
1167
1168	for_each_file(td, f, i)
1169		__init_rand_distribution(td, f);
1170
1171	td_restore_runstate(td, state);
1172
1173	return 1;
1174}
1175
1176/*
1177 * Check if the number of blocks exceeds the randomness capability of
1178 * the selected generator. Tausworthe is 32-bit, the others are fullly
1179 * 64-bit capable.
1180 */
1181static int check_rand_gen_limits(struct thread_data *td, struct fio_file *f,
1182				 uint64_t blocks)
1183{
1184	if (blocks <= FRAND32_MAX)
1185		return 0;
1186	if (td->o.random_generator != FIO_RAND_GEN_TAUSWORTHE)
1187		return 0;
1188
1189	/*
1190	 * If the user hasn't specified a random generator, switch
1191	 * to tausworthe64 with informational warning. If the user did
1192	 * specify one, just warn.
1193	 */
1194	log_info("fio: file %s exceeds 32-bit tausworthe random generator.\n",
1195			f->file_name);
1196
1197	if (!fio_option_is_set(&td->o, random_generator)) {
1198		log_info("fio: Switching to tausworthe64. Use the "
1199			 "random_generator= option to get rid of this "
1200			 "warning.\n");
1201		td->o.random_generator = FIO_RAND_GEN_TAUSWORTHE64;
1202		return 0;
1203	}
1204
1205	/*
1206	 * Just make this information to avoid breaking scripts.
1207	 */
1208	log_info("fio: Use the random_generator= option to switch to lfsr or "
1209			 "tausworthe64.\n");
1210	return 0;
1211}
1212
1213int init_random_map(struct thread_data *td)
1214{
1215	unsigned long long blocks;
1216	struct fio_file *f;
1217	unsigned int i;
1218
1219	if (init_rand_distribution(td))
1220		return 0;
1221	if (!td_random(td))
1222		return 0;
1223
1224	for_each_file(td, f, i) {
1225		uint64_t fsize = min(f->real_file_size, f->io_size);
1226
1227		blocks = fsize / (unsigned long long) td->o.rw_min_bs;
1228
1229		if (check_rand_gen_limits(td, f, blocks))
1230			return 1;
1231
1232		if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1233			unsigned long seed;
1234
1235			seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1236
1237			if (!lfsr_init(&f->lfsr, blocks, seed, 0)) {
1238				fio_file_set_lfsr(f);
1239				continue;
1240			}
1241		} else if (!td->o.norandommap) {
1242			f->io_axmap = axmap_new(blocks);
1243			if (f->io_axmap) {
1244				fio_file_set_axmap(f);
1245				continue;
1246			}
1247		} else if (td->o.norandommap)
1248			continue;
1249
1250		if (!td->o.softrandommap) {
1251			log_err("fio: failed allocating random map. If running"
1252				" a large number of jobs, try the 'norandommap'"
1253				" option or set 'softrandommap'. Or give"
1254				" a larger --alloc-size to fio.\n");
1255			return 1;
1256		}
1257
1258		log_info("fio: file %s failed allocating random map. Running "
1259			 "job without.\n", f->file_name);
1260	}
1261
1262	return 0;
1263}
1264
1265void close_files(struct thread_data *td)
1266{
1267	struct fio_file *f;
1268	unsigned int i;
1269
1270	for_each_file(td, f, i) {
1271		if (fio_file_open(f))
1272			td_io_close_file(td, f);
1273	}
1274}
1275
1276void close_and_free_files(struct thread_data *td)
1277{
1278	struct fio_file *f;
1279	unsigned int i;
1280
1281	dprint(FD_FILE, "close files\n");
1282
1283	for_each_file(td, f, i) {
1284		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1285			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1286			td_io_unlink_file(td, f);
1287		}
1288
1289		if (fio_file_open(f))
1290			td_io_close_file(td, f);
1291
1292		remove_file_hash(f);
1293
1294		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1295			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1296			td_io_unlink_file(td, f);
1297		}
1298
1299		sfree(f->file_name);
1300		f->file_name = NULL;
1301		if (fio_file_axmap(f)) {
1302			axmap_free(f->io_axmap);
1303			f->io_axmap = NULL;
1304		}
1305		sfree(f);
1306	}
1307
1308	td->o.filename = NULL;
1309	free(td->files);
1310	free(td->file_locks);
1311	td->files_index = 0;
1312	td->files = NULL;
1313	td->file_locks = NULL;
1314	td->o.file_lock_mode = FILE_LOCK_NONE;
1315	td->o.nr_files = 0;
1316}
1317
1318static void get_file_type(struct fio_file *f)
1319{
1320	struct stat sb;
1321
1322	if (!strcmp(f->file_name, "-"))
1323		f->filetype = FIO_TYPE_PIPE;
1324	else
1325		f->filetype = FIO_TYPE_FILE;
1326
1327#ifdef WIN32
1328	/* \\.\ is the device namespace in Windows, where every file is
1329	 * a block device */
1330	if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1331		f->filetype = FIO_TYPE_BLOCK;
1332#endif
1333
1334	if (!stat(f->file_name, &sb)) {
1335		if (S_ISBLK(sb.st_mode))
1336			f->filetype = FIO_TYPE_BLOCK;
1337		else if (S_ISCHR(sb.st_mode))
1338			f->filetype = FIO_TYPE_CHAR;
1339		else if (S_ISFIFO(sb.st_mode))
1340			f->filetype = FIO_TYPE_PIPE;
1341	}
1342}
1343
1344static bool __is_already_allocated(const char *fname, bool set)
1345{
1346	struct flist_head *entry;
1347	bool ret;
1348
1349	ret = file_bloom_exists(fname, set);
1350	if (!ret)
1351		return ret;
1352
1353	flist_for_each(entry, &filename_list) {
1354		struct file_name *fn;
1355
1356		fn = flist_entry(entry, struct file_name, list);
1357
1358		if (!strcmp(fn->filename, fname))
1359			return true;
1360	}
1361
1362	return false;
1363}
1364
1365static bool is_already_allocated(const char *fname)
1366{
1367	bool ret;
1368
1369	fio_file_hash_lock();
1370	ret = __is_already_allocated(fname, false);
1371	fio_file_hash_unlock();
1372
1373	return ret;
1374}
1375
1376static void set_already_allocated(const char *fname)
1377{
1378	struct file_name *fn;
1379
1380	fn = malloc(sizeof(struct file_name));
1381	fn->filename = strdup(fname);
1382
1383	fio_file_hash_lock();
1384	if (!__is_already_allocated(fname, true)) {
1385		flist_add_tail(&fn->list, &filename_list);
1386		fn = NULL;
1387	}
1388	fio_file_hash_unlock();
1389
1390	if (fn) {
1391		free(fn->filename);
1392		free(fn);
1393	}
1394}
1395
1396static void free_already_allocated(void)
1397{
1398	struct flist_head *entry, *tmp;
1399	struct file_name *fn;
1400
1401	if (flist_empty(&filename_list))
1402		return;
1403
1404	fio_file_hash_lock();
1405	flist_for_each_safe(entry, tmp, &filename_list) {
1406		fn = flist_entry(entry, struct file_name, list);
1407		free(fn->filename);
1408		flist_del(&fn->list);
1409		free(fn);
1410	}
1411
1412	fio_file_hash_unlock();
1413}
1414
1415static struct fio_file *alloc_new_file(struct thread_data *td)
1416{
1417	struct fio_file *f;
1418
1419	f = smalloc(sizeof(*f));
1420	if (!f) {
1421		assert(0);
1422		return NULL;
1423	}
1424
1425	f->fd = -1;
1426	f->shadow_fd = -1;
1427	fio_file_reset(td, f);
1428	return f;
1429}
1430
1431bool exists_and_not_regfile(const char *filename)
1432{
1433	struct stat sb;
1434
1435	if (lstat(filename, &sb) == -1)
1436		return false;
1437
1438#ifndef WIN32 /* NOT Windows */
1439	if (S_ISREG(sb.st_mode))
1440		return false;
1441#else
1442	/* \\.\ is the device namespace in Windows, where every file
1443	 * is a device node */
1444	if (S_ISREG(sb.st_mode) && strncmp(filename, "\\\\.\\", 4) != 0)
1445		return false;
1446#endif
1447
1448	return true;
1449}
1450
1451int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1452{
1453	int cur_files = td->files_index;
1454	char file_name[PATH_MAX];
1455	struct fio_file *f;
1456	int len = 0;
1457
1458	dprint(FD_FILE, "add file %s\n", fname);
1459
1460	if (td->o.directory)
1461		len = set_name_idx(file_name, PATH_MAX, td->o.directory, numjob,
1462					td->o.unique_filename);
1463
1464	sprintf(file_name + len, "%s", fname);
1465
1466	/* clean cloned siblings using existing files */
1467	if (numjob && is_already_allocated(file_name) &&
1468	    !exists_and_not_regfile(fname))
1469		return 0;
1470
1471	f = alloc_new_file(td);
1472
1473	if (td->files_size <= td->files_index) {
1474		unsigned int new_size = td->o.nr_files + 1;
1475
1476		dprint(FD_FILE, "resize file array to %d files\n", new_size);
1477
1478		td->files = realloc(td->files, new_size * sizeof(f));
1479		if (td->files == NULL) {
1480			log_err("fio: realloc OOM\n");
1481			assert(0);
1482		}
1483		if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1484			td->file_locks = realloc(td->file_locks, new_size);
1485			if (!td->file_locks) {
1486				log_err("fio: realloc OOM\n");
1487				assert(0);
1488			}
1489			td->file_locks[cur_files] = FILE_LOCK_NONE;
1490		}
1491		td->files_size = new_size;
1492	}
1493	td->files[cur_files] = f;
1494	f->fileno = cur_files;
1495
1496	/*
1497	 * init function, io engine may not be loaded yet
1498	 */
1499	if (td->io_ops && td_ioengine_flagged(td, FIO_DISKLESSIO))
1500		f->real_file_size = -1ULL;
1501
1502	f->file_name = smalloc_strdup(file_name);
1503	if (!f->file_name)
1504		assert(0);
1505
1506	get_file_type(f);
1507
1508	switch (td->o.file_lock_mode) {
1509	case FILE_LOCK_NONE:
1510		break;
1511	case FILE_LOCK_READWRITE:
1512		f->rwlock = fio_rwlock_init();
1513		break;
1514	case FILE_LOCK_EXCLUSIVE:
1515		f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1516		break;
1517	default:
1518		log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1519		assert(0);
1520	}
1521
1522	td->files_index++;
1523	if (f->filetype == FIO_TYPE_FILE)
1524		td->nr_normal_files++;
1525
1526	set_already_allocated(file_name);
1527
1528	if (inc)
1529		td->o.nr_files++;
1530
1531	dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1532							cur_files);
1533
1534	return cur_files;
1535}
1536
1537int add_file_exclusive(struct thread_data *td, const char *fname)
1538{
1539	struct fio_file *f;
1540	unsigned int i;
1541
1542	for_each_file(td, f, i) {
1543		if (!strcmp(f->file_name, fname))
1544			return i;
1545	}
1546
1547	return add_file(td, fname, 0, 1);
1548}
1549
1550void get_file(struct fio_file *f)
1551{
1552	dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1553	assert(fio_file_open(f));
1554	f->references++;
1555}
1556
1557int put_file(struct thread_data *td, struct fio_file *f)
1558{
1559	int f_ret = 0, ret = 0;
1560
1561	dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1562
1563	if (!fio_file_open(f)) {
1564		assert(f->fd == -1);
1565		return 0;
1566	}
1567
1568	assert(f->references);
1569	if (--f->references)
1570		return 0;
1571
1572	if (should_fsync(td) && td->o.fsync_on_close) {
1573		f_ret = fsync(f->fd);
1574		if (f_ret < 0)
1575			f_ret = errno;
1576	}
1577
1578	if (td->io_ops->close_file)
1579		ret = td->io_ops->close_file(td, f);
1580
1581	if (!ret)
1582		ret = f_ret;
1583
1584	td->nr_open_files--;
1585	fio_file_clear_open(f);
1586	assert(f->fd == -1);
1587	return ret;
1588}
1589
1590void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1591{
1592	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1593		return;
1594
1595	if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1596		if (ddir == DDIR_READ)
1597			fio_rwlock_read(f->rwlock);
1598		else
1599			fio_rwlock_write(f->rwlock);
1600	} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1601		fio_mutex_down(f->lock);
1602
1603	td->file_locks[f->fileno] = td->o.file_lock_mode;
1604}
1605
1606void unlock_file(struct thread_data *td, struct fio_file *f)
1607{
1608	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1609		return;
1610
1611	if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1612		fio_rwlock_unlock(f->rwlock);
1613	else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1614		fio_mutex_up(f->lock);
1615
1616	td->file_locks[f->fileno] = FILE_LOCK_NONE;
1617}
1618
1619void unlock_file_all(struct thread_data *td, struct fio_file *f)
1620{
1621	if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1622		return;
1623	if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1624		unlock_file(td, f);
1625}
1626
1627static int recurse_dir(struct thread_data *td, const char *dirname)
1628{
1629	struct dirent *dir;
1630	int ret = 0;
1631	DIR *D;
1632
1633	D = opendir(dirname);
1634	if (!D) {
1635		char buf[FIO_VERROR_SIZE];
1636
1637		snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1638		td_verror(td, errno, buf);
1639		return 1;
1640	}
1641
1642	while ((dir = readdir(D)) != NULL) {
1643		char full_path[PATH_MAX];
1644		struct stat sb;
1645
1646		if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1647			continue;
1648
1649		sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1650
1651		if (lstat(full_path, &sb) == -1) {
1652			if (errno != ENOENT) {
1653				td_verror(td, errno, "stat");
1654				ret = 1;
1655				break;
1656			}
1657		}
1658
1659		if (S_ISREG(sb.st_mode)) {
1660			add_file(td, full_path, 0, 1);
1661			continue;
1662		}
1663		if (!S_ISDIR(sb.st_mode))
1664			continue;
1665
1666		ret = recurse_dir(td, full_path);
1667		if (ret)
1668			break;
1669	}
1670
1671	closedir(D);
1672	return ret;
1673}
1674
1675int add_dir_files(struct thread_data *td, const char *path)
1676{
1677	int ret = recurse_dir(td, path);
1678
1679	if (!ret)
1680		log_info("fio: opendir added %d files\n", td->o.nr_files);
1681
1682	return ret;
1683}
1684
1685void dup_files(struct thread_data *td, struct thread_data *org)
1686{
1687	struct fio_file *f;
1688	unsigned int i;
1689
1690	dprint(FD_FILE, "dup files: %d\n", org->files_index);
1691
1692	if (!org->files)
1693		return;
1694
1695	td->files = malloc(org->files_index * sizeof(f));
1696
1697	if (td->o.file_lock_mode != FILE_LOCK_NONE)
1698		td->file_locks = malloc(org->files_index);
1699
1700	for_each_file(org, f, i) {
1701		struct fio_file *__f;
1702
1703		__f = alloc_new_file(td);
1704
1705		if (f->file_name) {
1706			__f->file_name = smalloc_strdup(f->file_name);
1707			if (!__f->file_name)
1708				assert(0);
1709
1710			__f->filetype = f->filetype;
1711		}
1712
1713		if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1714			__f->lock = f->lock;
1715		else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1716			__f->rwlock = f->rwlock;
1717
1718		td->files[i] = __f;
1719	}
1720}
1721
1722/*
1723 * Returns the index that matches the filename, or -1 if not there
1724 */
1725int get_fileno(struct thread_data *td, const char *fname)
1726{
1727	struct fio_file *f;
1728	unsigned int i;
1729
1730	for_each_file(td, f, i)
1731		if (!strcmp(f->file_name, fname))
1732			return i;
1733
1734	return -1;
1735}
1736
1737/*
1738 * For log usage, where we add/open/close files automatically
1739 */
1740void free_release_files(struct thread_data *td)
1741{
1742	close_files(td);
1743	td->o.nr_files = 0;
1744	td->o.open_files = 0;
1745	td->files_index = 0;
1746	td->nr_normal_files = 0;
1747}
1748
1749void fio_file_reset(struct thread_data *td, struct fio_file *f)
1750{
1751	int i;
1752
1753	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
1754		f->last_pos[i] = f->file_offset;
1755		f->last_start[i] = -1ULL;
1756	}
1757
1758	if (fio_file_axmap(f))
1759		axmap_reset(f->io_axmap);
1760	else if (fio_file_lfsr(f))
1761		lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1762}
1763
1764bool fio_files_done(struct thread_data *td)
1765{
1766	struct fio_file *f;
1767	unsigned int i;
1768
1769	for_each_file(td, f, i)
1770		if (!fio_file_done(f))
1771			return false;
1772
1773	return true;
1774}
1775
1776/* free memory used in initialization phase only */
1777void filesetup_mem_free(void)
1778{
1779	free_already_allocated();
1780}
1781