filesetup.c revision d1faa06dc74fcdcae02e70260c90121502ef01cf
1#include <unistd.h>
2#include <fcntl.h>
3#include <string.h>
4#include <assert.h>
5#include <dirent.h>
6#include <libgen.h>
7#include <sys/stat.h>
8#include <sys/mman.h>
9#include <sys/types.h>
10
11#include "fio.h"
12#include "smalloc.h"
13#include "filehash.h"
14#include "options.h"
15#include "os/os.h"
16#include "hash.h"
17#include "lib/axmap.h"
18
19#ifdef CONFIG_LINUX_FALLOCATE
20#include <linux/falloc.h>
21#endif
22
23static int root_warn;
24
25static FLIST_HEAD(filename_list);
26
27static inline void clear_error(struct thread_data *td)
28{
29	td->error = 0;
30	td->verror[0] = '\0';
31}
32
33/*
34 * Leaves f->fd open on success, caller must close
35 */
36static int extend_file(struct thread_data *td, struct fio_file *f)
37{
38	int r, new_layout = 0, unlink_file = 0, flags;
39	unsigned long long left;
40	unsigned int bs;
41	char *b = NULL;
42
43	if (read_only) {
44		log_err("fio: refusing extend of file due to read-only\n");
45		return 0;
46	}
47
48	/*
49	 * check if we need to lay the file out complete again. fio
50	 * does that for operations involving reads, or for writes
51	 * where overwrite is set
52	 */
53	if (td_read(td) ||
54	   (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55	    (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
56		new_layout = 1;
57	if (td_write(td) && !td->o.overwrite && !td->o.file_append)
58		unlink_file = 1;
59
60	if (unlink_file || new_layout) {
61		dprint(FD_FILE, "layout unlink %s\n", f->file_name);
62		if ((unlink(f->file_name) < 0) && (errno != ENOENT)) {
63			td_verror(td, errno, "unlink");
64			return 1;
65		}
66	}
67
68	flags = O_WRONLY | O_CREAT;
69	if (new_layout)
70		flags |= O_TRUNC;
71
72	dprint(FD_FILE, "open file %s, flags %x\n", f->file_name, flags);
73	f->fd = open(f->file_name, flags, 0644);
74	if (f->fd < 0) {
75		td_verror(td, errno, "open");
76		return 1;
77	}
78
79#ifdef CONFIG_POSIX_FALLOCATE
80	if (!td->o.fill_device) {
81		switch (td->o.fallocate_mode) {
82		case FIO_FALLOCATE_NONE:
83			break;
84		case FIO_FALLOCATE_POSIX:
85			dprint(FD_FILE, "posix_fallocate file %s size %llu\n",
86				 f->file_name,
87				 (unsigned long long) f->real_file_size);
88
89			r = posix_fallocate(f->fd, 0, f->real_file_size);
90			if (r > 0) {
91				log_err("fio: posix_fallocate fails: %s\n",
92						strerror(r));
93			}
94			break;
95#ifdef CONFIG_LINUX_FALLOCATE
96		case FIO_FALLOCATE_KEEP_SIZE:
97			dprint(FD_FILE,
98				"fallocate(FALLOC_FL_KEEP_SIZE) "
99				"file %s size %llu\n", f->file_name,
100				(unsigned long long) f->real_file_size);
101
102			r = fallocate(f->fd, FALLOC_FL_KEEP_SIZE, 0,
103					f->real_file_size);
104			if (r != 0)
105				td_verror(td, errno, "fallocate");
106
107			break;
108#endif /* CONFIG_LINUX_FALLOCATE */
109		default:
110			log_err("fio: unknown fallocate mode: %d\n",
111				td->o.fallocate_mode);
112			assert(0);
113		}
114	}
115#endif /* CONFIG_POSIX_FALLOCATE */
116
117	if (!new_layout)
118		goto done;
119
120	/*
121	 * The size will be -1ULL when fill_device is used, so don't truncate
122	 * or fallocate this file, just write it
123	 */
124	if (!td->o.fill_device) {
125		dprint(FD_FILE, "truncate file %s, size %llu\n", f->file_name,
126					(unsigned long long) f->real_file_size);
127		if (ftruncate(f->fd, f->real_file_size) == -1) {
128			if (errno != EFBIG) {
129				td_verror(td, errno, "ftruncate");
130				goto err;
131			}
132		}
133	}
134
135	b = malloc(td->o.max_bs[DDIR_WRITE]);
136
137	left = f->real_file_size;
138	while (left && !td->terminate) {
139		bs = td->o.max_bs[DDIR_WRITE];
140		if (bs > left)
141			bs = left;
142
143		fill_io_buffer(td, b, bs, bs);
144
145		r = write(f->fd, b, bs);
146
147		if (r > 0) {
148			left -= r;
149			continue;
150		} else {
151			if (r < 0) {
152				int __e = errno;
153
154				if (__e == ENOSPC) {
155					if (td->o.fill_device)
156						break;
157					log_info("fio: ENOSPC on laying out "
158						 "file, stopping\n");
159					break;
160				}
161				td_verror(td, errno, "write");
162			} else
163				td_verror(td, EIO, "write");
164
165			break;
166		}
167	}
168
169	if (td->terminate) {
170		dprint(FD_FILE, "terminate unlink %s\n", f->file_name);
171		unlink(f->file_name);
172	} else if (td->o.create_fsync) {
173		if (fsync(f->fd) < 0) {
174			td_verror(td, errno, "fsync");
175			goto err;
176		}
177	}
178	if (td->o.fill_device && !td_write(td)) {
179		fio_file_clear_size_known(f);
180		if (td_io_get_file_size(td, f))
181			goto err;
182		if (f->io_size > f->real_file_size)
183			f->io_size = f->real_file_size;
184	}
185
186	free(b);
187done:
188	return 0;
189err:
190	close(f->fd);
191	f->fd = -1;
192	if (b)
193		free(b);
194	return 1;
195}
196
197static int pre_read_file(struct thread_data *td, struct fio_file *f)
198{
199	int ret = 0, r, did_open = 0, old_runstate;
200	unsigned long long left;
201	unsigned int bs;
202	char *b;
203
204	if (td->io_ops->flags & FIO_PIPEIO)
205		return 0;
206
207	if (!fio_file_open(f)) {
208		if (td->io_ops->open_file(td, f)) {
209			log_err("fio: cannot pre-read, failed to open file\n");
210			return 1;
211		}
212		did_open = 1;
213	}
214
215	old_runstate = td_bump_runstate(td, TD_PRE_READING);
216
217	bs = td->o.max_bs[DDIR_READ];
218	b = malloc(bs);
219	memset(b, 0, bs);
220
221	if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
222		td_verror(td, errno, "lseek");
223		log_err("fio: failed to lseek pre-read file\n");
224		ret = 1;
225		goto error;
226	}
227
228	left = f->io_size;
229
230	while (left && !td->terminate) {
231		if (bs > left)
232			bs = left;
233
234		r = read(f->fd, b, bs);
235
236		if (r == (int) bs) {
237			left -= bs;
238			continue;
239		} else {
240			td_verror(td, EIO, "pre_read");
241			break;
242		}
243	}
244
245error:
246	td_restore_runstate(td, old_runstate);
247
248	if (did_open)
249		td->io_ops->close_file(td, f);
250
251	free(b);
252	return ret;
253}
254
255static unsigned long long get_rand_file_size(struct thread_data *td)
256{
257	unsigned long long ret, sized;
258	unsigned long r;
259
260	if (td->o.use_os_rand) {
261		r = os_random_long(&td->file_size_state);
262		sized = td->o.file_size_high - td->o.file_size_low;
263		ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0)));
264	} else {
265		r = __rand(&td->__file_size_state);
266		sized = td->o.file_size_high - td->o.file_size_low;
267		ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0)));
268	}
269
270	ret += td->o.file_size_low;
271	ret -= (ret % td->o.rw_min_bs);
272	return ret;
273}
274
275static int file_size(struct thread_data *td, struct fio_file *f)
276{
277	struct stat st;
278
279	if (stat(f->file_name, &st) == -1) {
280		td_verror(td, errno, "fstat");
281		return 1;
282	}
283
284	f->real_file_size = st.st_size;
285	return 0;
286}
287
288static int bdev_size(struct thread_data *td, struct fio_file *f)
289{
290	unsigned long long bytes = 0;
291	int r;
292
293	if (td->io_ops->open_file(td, f)) {
294		log_err("fio: failed opening blockdev %s for size check\n",
295			f->file_name);
296		return 1;
297	}
298
299	r = blockdev_size(f, &bytes);
300	if (r) {
301		td_verror(td, r, "blockdev_size");
302		goto err;
303	}
304
305	if (!bytes) {
306		log_err("%s: zero sized block device?\n", f->file_name);
307		goto err;
308	}
309
310	f->real_file_size = bytes;
311	td->io_ops->close_file(td, f);
312	return 0;
313err:
314	td->io_ops->close_file(td, f);
315	return 1;
316}
317
318static int char_size(struct thread_data *td, struct fio_file *f)
319{
320#ifdef FIO_HAVE_CHARDEV_SIZE
321	unsigned long long bytes = 0;
322	int r;
323
324	if (td->io_ops->open_file(td, f)) {
325		log_err("fio: failed opening blockdev %s for size check\n",
326			f->file_name);
327		return 1;
328	}
329
330	r = chardev_size(f, &bytes);
331	if (r) {
332		td_verror(td, r, "chardev_size");
333		goto err;
334	}
335
336	if (!bytes) {
337		log_err("%s: zero sized char device?\n", f->file_name);
338		goto err;
339	}
340
341	f->real_file_size = bytes;
342	td->io_ops->close_file(td, f);
343	return 0;
344err:
345	td->io_ops->close_file(td, f);
346	return 1;
347#else
348	f->real_file_size = -1ULL;
349	return 0;
350#endif
351}
352
353static int get_file_size(struct thread_data *td, struct fio_file *f)
354{
355	int ret = 0;
356
357	if (fio_file_size_known(f))
358		return 0;
359
360	if (f->filetype == FIO_TYPE_FILE)
361		ret = file_size(td, f);
362	else if (f->filetype == FIO_TYPE_BD)
363		ret = bdev_size(td, f);
364	else if (f->filetype == FIO_TYPE_CHAR)
365		ret = char_size(td, f);
366	else
367		f->real_file_size = -1;
368
369	if (ret)
370		return ret;
371
372	if (f->file_offset > f->real_file_size) {
373		log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
374					(unsigned long long) f->file_offset,
375					(unsigned long long) f->real_file_size);
376		return 1;
377	}
378
379	fio_file_set_size_known(f);
380	return 0;
381}
382
383static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
384				   unsigned long long off,
385				   unsigned long long len)
386{
387	int ret = 0;
388
389	if (len == -1ULL)
390		len = f->io_size;
391	if (off == -1ULL)
392		off = f->file_offset;
393
394	if (len == -1ULL || off == -1ULL)
395		return 0;
396
397	dprint(FD_IO, "invalidate cache %s: %llu/%llu\n", f->file_name, off,
398								len);
399
400	if (f->mmap_ptr) {
401		ret = posix_madvise(f->mmap_ptr, f->mmap_sz, POSIX_MADV_DONTNEED);
402#ifdef FIO_MADV_FREE
403		if (f->filetype == FIO_TYPE_BD)
404			(void) posix_madvise(f->mmap_ptr, f->mmap_sz, FIO_MADV_FREE);
405#endif
406	} else if (f->filetype == FIO_TYPE_FILE) {
407		ret = posix_fadvise(f->fd, off, len, POSIX_FADV_DONTNEED);
408	} else if (f->filetype == FIO_TYPE_BD) {
409		ret = blockdev_invalidate_cache(f);
410		if (ret < 0 && errno == EACCES && geteuid()) {
411			if (!root_warn) {
412				log_err("fio: only root may flush block "
413					"devices. Cache flush bypassed!\n");
414				root_warn = 1;
415			}
416			ret = 0;
417		}
418	} else if (f->filetype == FIO_TYPE_CHAR || f->filetype == FIO_TYPE_PIPE)
419		ret = 0;
420
421	/*
422	 * Cache flushing isn't a fatal condition, and we know it will
423	 * happen on some platforms where we don't have the proper
424	 * function to flush eg block device caches. So just warn and
425	 * continue on our way.
426	 */
427	if (ret) {
428		log_info("fio: cache invalidation of %s failed: %s\n", f->file_name, strerror(errno));
429		ret = 0;
430	}
431
432	return 0;
433
434}
435
436int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
437{
438	if (!fio_file_open(f))
439		return 0;
440
441	return __file_invalidate_cache(td, f, -1ULL, -1ULL);
442}
443
444int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
445{
446	int ret = 0;
447
448	dprint(FD_FILE, "fd close %s\n", f->file_name);
449
450	remove_file_hash(f);
451
452	if (close(f->fd) < 0)
453		ret = errno;
454
455	f->fd = -1;
456
457	if (f->shadow_fd != -1) {
458		close(f->shadow_fd);
459		f->shadow_fd = -1;
460	}
461
462	f->engine_data = 0;
463	return ret;
464}
465
466int file_lookup_open(struct fio_file *f, int flags)
467{
468	struct fio_file *__f;
469	int from_hash;
470
471	__f = lookup_file_hash(f->file_name);
472	if (__f) {
473		dprint(FD_FILE, "found file in hash %s\n", f->file_name);
474		/*
475		 * racy, need the __f->lock locked
476		 */
477		f->lock = __f->lock;
478		from_hash = 1;
479	} else {
480		dprint(FD_FILE, "file not found in hash %s\n", f->file_name);
481		from_hash = 0;
482	}
483
484	f->fd = open(f->file_name, flags, 0600);
485	return from_hash;
486}
487
488static int file_close_shadow_fds(struct thread_data *td)
489{
490	struct fio_file *f;
491	int num_closed = 0;
492	unsigned int i;
493
494	for_each_file(td, f, i) {
495		if (f->shadow_fd == -1)
496			continue;
497
498		close(f->shadow_fd);
499		f->shadow_fd = -1;
500		num_closed++;
501	}
502
503	return num_closed;
504}
505
506int generic_open_file(struct thread_data *td, struct fio_file *f)
507{
508	int is_std = 0;
509	int flags = 0;
510	int from_hash = 0;
511
512	dprint(FD_FILE, "fd open %s\n", f->file_name);
513
514	if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
515		log_err("fio: trim only applies to block device\n");
516		return 1;
517	}
518
519	if (!strcmp(f->file_name, "-")) {
520		if (td_rw(td)) {
521			log_err("fio: can't read/write to stdin/out\n");
522			return 1;
523		}
524		is_std = 1;
525
526		/*
527		 * move output logging to stderr, if we are writing to stdout
528		 */
529		if (td_write(td))
530			f_out = stderr;
531	}
532
533	if (td_trim(td))
534		goto skip_flags;
535	if (td->o.odirect)
536		flags |= OS_O_DIRECT;
537	if (td->o.oatomic) {
538		if (!FIO_O_ATOMIC) {
539			td_verror(td, EINVAL, "OS does not support atomic IO");
540			return 1;
541		}
542		flags |= OS_O_DIRECT | FIO_O_ATOMIC;
543	}
544	if (td->o.sync_io)
545		flags |= O_SYNC;
546	if (td->o.create_on_open)
547		flags |= O_CREAT;
548skip_flags:
549	if (f->filetype != FIO_TYPE_FILE)
550		flags |= FIO_O_NOATIME;
551
552open_again:
553	if (td_write(td)) {
554		if (!read_only)
555			flags |= O_RDWR;
556
557		if (f->filetype == FIO_TYPE_FILE)
558			flags |= O_CREAT;
559
560		if (is_std)
561			f->fd = dup(STDOUT_FILENO);
562		else
563			from_hash = file_lookup_open(f, flags);
564	} else if (td_read(td)) {
565		if (f->filetype == FIO_TYPE_CHAR && !read_only)
566			flags |= O_RDWR;
567		else
568			flags |= O_RDONLY;
569
570		if (is_std)
571			f->fd = dup(STDIN_FILENO);
572		else
573			from_hash = file_lookup_open(f, flags);
574	} else { //td trim
575		flags |= O_RDWR;
576		from_hash = file_lookup_open(f, flags);
577	}
578
579	if (f->fd == -1) {
580		char buf[FIO_VERROR_SIZE];
581		int __e = errno;
582
583		if (__e == EPERM && (flags & FIO_O_NOATIME)) {
584			flags &= ~FIO_O_NOATIME;
585			goto open_again;
586		}
587		if (__e == EMFILE && file_close_shadow_fds(td))
588			goto open_again;
589
590		snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
591
592		if (__e == EINVAL && (flags & OS_O_DIRECT)) {
593			log_err("fio: looks like your file system does not " \
594				"support direct=1/buffered=0\n");
595		}
596
597		td_verror(td, __e, buf);
598	}
599
600	if (!from_hash && f->fd != -1) {
601		if (add_file_hash(f)) {
602			int fio_unused ret;
603
604			/*
605			 * Stash away descriptor for later close. This is to
606			 * work-around a "feature" on Linux, where a close of
607			 * an fd that has been opened for write will trigger
608			 * udev to call blkid to check partitions, fs id, etc.
609			 * That pollutes the device cache, which can slow down
610			 * unbuffered accesses.
611			 */
612			if (f->shadow_fd == -1)
613				f->shadow_fd = f->fd;
614			else {
615				/*
616			 	 * OK to ignore, we haven't done anything
617				 * with it
618				 */
619				ret = generic_close_file(td, f);
620			}
621			goto open_again;
622		}
623	}
624
625	return 0;
626}
627
628int generic_get_file_size(struct thread_data *td, struct fio_file *f)
629{
630	return get_file_size(td, f);
631}
632
633/*
634 * open/close all files, so that ->real_file_size gets set
635 */
636static int get_file_sizes(struct thread_data *td)
637{
638	struct fio_file *f;
639	unsigned int i;
640	int err = 0;
641
642	for_each_file(td, f, i) {
643		dprint(FD_FILE, "get file size for %p/%d/%p\n", f, i,
644								f->file_name);
645
646		if (td_io_get_file_size(td, f)) {
647			if (td->error != ENOENT) {
648				log_err("%s\n", td->verror);
649				err = 1;
650			}
651			clear_error(td);
652		}
653
654		if (f->real_file_size == -1ULL && td->o.size)
655			f->real_file_size = td->o.size / td->o.nr_files;
656	}
657
658	return err;
659}
660
661struct fio_mount {
662	struct flist_head list;
663	const char *base;
664	char __base[256];
665	unsigned int key;
666};
667
668/*
669 * Get free number of bytes for each file on each unique mount.
670 */
671static unsigned long long get_fs_free_counts(struct thread_data *td)
672{
673	struct flist_head *n, *tmp;
674	unsigned long long ret = 0;
675	struct fio_mount *fm;
676	FLIST_HEAD(list);
677	struct fio_file *f;
678	unsigned int i;
679
680	for_each_file(td, f, i) {
681		struct stat sb;
682		char buf[256];
683
684		if (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_CHAR) {
685			if (f->real_file_size != -1ULL)
686				ret += f->real_file_size;
687			continue;
688		} else if (f->filetype != FIO_TYPE_FILE)
689			continue;
690
691		buf[255] = '\0';
692		strncpy(buf, f->file_name, 255);
693
694		if (stat(buf, &sb) < 0) {
695			if (errno != ENOENT)
696				break;
697			strcpy(buf, ".");
698			if (stat(buf, &sb) < 0)
699				break;
700		}
701
702		fm = NULL;
703		flist_for_each(n, &list) {
704			fm = flist_entry(n, struct fio_mount, list);
705			if (fm->key == sb.st_dev)
706				break;
707
708			fm = NULL;
709		}
710
711		if (fm)
712			continue;
713
714		fm = calloc(1, sizeof(*fm));
715		strncpy(fm->__base, buf, sizeof(fm->__base) - 1);
716		fm->base = basename(fm->__base);
717		fm->key = sb.st_dev;
718		flist_add(&fm->list, &list);
719	}
720
721	flist_for_each_safe(n, tmp, &list) {
722		unsigned long long sz;
723
724		fm = flist_entry(n, struct fio_mount, list);
725		flist_del(&fm->list);
726
727		sz = get_fs_size(fm->base);
728		if (sz && sz != -1ULL)
729			ret += sz;
730
731		free(fm);
732	}
733
734	return ret;
735}
736
737uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
738{
739	struct thread_options *o = &td->o;
740
741	if (o->file_append && f->filetype == FIO_TYPE_FILE)
742		return f->real_file_size;
743
744	return td->o.start_offset +
745		(td->thread_number - 1) * td->o.offset_increment;
746}
747
748/*
749 * Open the files and setup files sizes, creating files if necessary.
750 */
751int setup_files(struct thread_data *td)
752{
753	unsigned long long total_size, extend_size;
754	struct thread_options *o = &td->o;
755	struct fio_file *f;
756	unsigned int i, nr_fs_extra = 0;
757	int err = 0, need_extend;
758	int old_state;
759	const unsigned int bs = td_min_bs(td);
760	uint64_t fs = 0;
761
762	dprint(FD_FILE, "setup files\n");
763
764	old_state = td_bump_runstate(td, TD_SETTING_UP);
765
766	if (o->read_iolog_file)
767		goto done;
768
769	/*
770	 * if ioengine defines a setup() method, it's responsible for
771	 * opening the files and setting f->real_file_size to indicate
772	 * the valid range for that file.
773	 */
774	if (td->io_ops->setup)
775		err = td->io_ops->setup(td);
776	else
777		err = get_file_sizes(td);
778
779	if (err)
780		goto err_out;
781
782	/*
783	 * check sizes. if the files/devices do not exist and the size
784	 * isn't passed to fio, abort.
785	 */
786	total_size = 0;
787	for_each_file(td, f, i) {
788		if (f->real_file_size == -1ULL)
789			total_size = -1ULL;
790		else
791			total_size += f->real_file_size;
792	}
793
794	if (o->fill_device)
795		td->fill_device_size = get_fs_free_counts(td);
796
797	/*
798	 * device/file sizes are zero and no size given, punt
799	 */
800	if ((!total_size || total_size == -1ULL) && !o->size &&
801	    !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
802	    !(o->nr_files && (o->file_size_low || o->file_size_high))) {
803		log_err("%s: you need to specify size=\n", o->name);
804		td_verror(td, EINVAL, "total_file_size");
805		goto err_out;
806	}
807
808	/*
809	 * Calculate per-file size and potential extra size for the
810	 * first files, if needed.
811	 */
812	if (!o->file_size_low && o->nr_files) {
813		uint64_t all_fs;
814
815		fs = o->size / o->nr_files;
816		all_fs = fs * o->nr_files;
817
818		if (all_fs < o->size)
819			nr_fs_extra = (o->size - all_fs) / bs;
820	}
821
822	/*
823	 * now file sizes are known, so we can set ->io_size. if size= is
824	 * not given, ->io_size is just equal to ->real_file_size. if size
825	 * is given, ->io_size is size / nr_files.
826	 */
827	extend_size = total_size = 0;
828	need_extend = 0;
829	for_each_file(td, f, i) {
830		f->file_offset = get_start_offset(td, f);
831
832		if (!o->file_size_low) {
833			/*
834			 * no file size range given, file size is equal to
835			 * total size divided by number of files. If that is
836			 * zero, set it to the real file size. If the size
837			 * doesn't divide nicely with the min blocksize,
838			 * make the first files bigger.
839			 */
840			f->io_size = fs;
841			if (nr_fs_extra) {
842				nr_fs_extra--;
843				f->io_size += bs;
844			}
845
846			if (!f->io_size)
847				f->io_size = f->real_file_size - f->file_offset;
848		} else if (f->real_file_size < o->file_size_low ||
849			   f->real_file_size > o->file_size_high) {
850			if (f->file_offset > o->file_size_low)
851				goto err_offset;
852			/*
853			 * file size given. if it's fixed, use that. if it's a
854			 * range, generate a random size in-between.
855			 */
856			if (o->file_size_low == o->file_size_high)
857				f->io_size = o->file_size_low - f->file_offset;
858			else {
859				f->io_size = get_rand_file_size(td)
860						- f->file_offset;
861			}
862		} else
863			f->io_size = f->real_file_size - f->file_offset;
864
865		if (f->io_size == -1ULL)
866			total_size = -1ULL;
867		else {
868                        if (o->size_percent)
869                                f->io_size = (f->io_size * o->size_percent) / 100;
870			total_size += f->io_size;
871		}
872
873		if (f->filetype == FIO_TYPE_FILE &&
874		    (f->io_size + f->file_offset) > f->real_file_size &&
875		    !(td->io_ops->flags & FIO_DISKLESSIO)) {
876			if (!o->create_on_open) {
877				need_extend++;
878				extend_size += (f->io_size + f->file_offset);
879			} else
880				f->real_file_size = f->io_size + f->file_offset;
881			fio_file_set_extend(f);
882		}
883	}
884
885	if (!o->size || o->size > total_size)
886		o->size = total_size;
887
888	if (o->size < td_min_bs(td)) {
889		log_err("fio: blocksize too large for data set\n");
890		goto err_out;
891	}
892
893	/*
894	 * See if we need to extend some files
895	 */
896	if (need_extend) {
897		temp_stall_ts = 1;
898		if (output_format == FIO_OUTPUT_NORMAL)
899			log_info("%s: Laying out IO file(s) (%u file(s) /"
900				 " %lluMB)\n", o->name, need_extend,
901					extend_size >> 20);
902
903		for_each_file(td, f, i) {
904			unsigned long long old_len = -1ULL, extend_len = -1ULL;
905
906			if (!fio_file_extend(f))
907				continue;
908
909			assert(f->filetype == FIO_TYPE_FILE);
910			fio_file_clear_extend(f);
911			if (!o->fill_device) {
912				old_len = f->real_file_size;
913				extend_len = f->io_size + f->file_offset -
914						old_len;
915			}
916			f->real_file_size = (f->io_size + f->file_offset);
917			err = extend_file(td, f);
918			if (err)
919				break;
920
921			err = __file_invalidate_cache(td, f, old_len,
922								extend_len);
923
924			/*
925			 * Shut up static checker
926			 */
927			if (f->fd != -1)
928				close(f->fd);
929
930			f->fd = -1;
931			if (err)
932				break;
933		}
934		temp_stall_ts = 0;
935	}
936
937	if (err)
938		goto err_out;
939
940	if (!o->zone_size)
941		o->zone_size = o->size;
942
943	/*
944	 * iolog already set the total io size, if we read back
945	 * stored entries.
946	 */
947	if (!o->read_iolog_file)
948		td->total_io_size = o->size * o->loops;
949
950done:
951	if (o->create_only)
952		td->done = 1;
953
954	td_restore_runstate(td, old_state);
955	return 0;
956err_offset:
957	log_err("%s: you need to specify valid offset=\n", o->name);
958err_out:
959	td_restore_runstate(td, old_state);
960	return 1;
961}
962
963int pre_read_files(struct thread_data *td)
964{
965	struct fio_file *f;
966	unsigned int i;
967
968	dprint(FD_FILE, "pre_read files\n");
969
970	for_each_file(td, f, i) {
971		pre_read_file(td, f);
972	}
973
974	return 1;
975}
976
977static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
978{
979	unsigned int range_size, seed;
980	unsigned long nranges;
981	uint64_t file_size;
982
983	range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
984	file_size = min(f->real_file_size, f->io_size);
985
986	nranges = (file_size + range_size - 1) / range_size;
987
988	seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
989	if (!td->o.rand_repeatable)
990		seed = td->rand_seeds[4];
991
992	if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
993		zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
994	else
995		pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
996
997	return 1;
998}
999
1000static int init_rand_distribution(struct thread_data *td)
1001{
1002	struct fio_file *f;
1003	unsigned int i;
1004	int state;
1005
1006	if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1007		return 0;
1008
1009	state = td_bump_runstate(td, TD_SETTING_UP);
1010
1011	for_each_file(td, f, i)
1012		__init_rand_distribution(td, f);
1013
1014	td_restore_runstate(td, state);
1015
1016	return 1;
1017}
1018
1019int init_random_map(struct thread_data *td)
1020{
1021	unsigned long long blocks;
1022	struct fio_file *f;
1023	unsigned int i;
1024
1025	if (init_rand_distribution(td))
1026		return 0;
1027	if (!td_random(td))
1028		return 0;
1029
1030	for_each_file(td, f, i) {
1031		uint64_t file_size = min(f->real_file_size, f->io_size);
1032
1033		blocks = file_size / (unsigned long long) td->o.rw_min_bs;
1034
1035		if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1036			unsigned long seed;
1037
1038			seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1039
1040			if (!lfsr_init(&f->lfsr, blocks, seed, 0))
1041				continue;
1042		} else if (!td->o.norandommap) {
1043			f->io_axmap = axmap_new(blocks);
1044			if (f->io_axmap)
1045				continue;
1046		} else if (td->o.norandommap)
1047			continue;
1048
1049		if (!td->o.softrandommap) {
1050			log_err("fio: failed allocating random map. If running"
1051				" a large number of jobs, try the 'norandommap'"
1052				" option or set 'softrandommap'. Or give"
1053				" a larger --alloc-size to fio.\n");
1054			return 1;
1055		}
1056
1057		log_info("fio: file %s failed allocating random map. Running "
1058			 "job without.\n", f->file_name);
1059	}
1060
1061	return 0;
1062}
1063
1064void close_files(struct thread_data *td)
1065{
1066	struct fio_file *f;
1067	unsigned int i;
1068
1069	for_each_file(td, f, i) {
1070		if (fio_file_open(f))
1071			td_io_close_file(td, f);
1072	}
1073}
1074
1075void close_and_free_files(struct thread_data *td)
1076{
1077	struct fio_file *f;
1078	unsigned int i;
1079
1080	dprint(FD_FILE, "close files\n");
1081
1082	for_each_file(td, f, i) {
1083		if (fio_file_open(f))
1084			td_io_close_file(td, f);
1085
1086		remove_file_hash(f);
1087
1088		if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1089			dprint(FD_FILE, "free unlink %s\n", f->file_name);
1090			unlink(f->file_name);
1091		}
1092
1093		sfree(f->file_name);
1094		f->file_name = NULL;
1095		axmap_free(f->io_axmap);
1096		f->io_axmap = NULL;
1097		sfree(f);
1098	}
1099
1100	td->o.filename = NULL;
1101	free(td->files);
1102	free(td->file_locks);
1103	td->files_index = 0;
1104	td->files = NULL;
1105	td->file_locks = NULL;
1106	td->o.file_lock_mode = FILE_LOCK_NONE;
1107	td->o.nr_files = 0;
1108}
1109
1110static void get_file_type(struct fio_file *f)
1111{
1112	struct stat sb;
1113
1114	if (!strcmp(f->file_name, "-"))
1115		f->filetype = FIO_TYPE_PIPE;
1116	else
1117		f->filetype = FIO_TYPE_FILE;
1118
1119	/* \\.\ is the device namespace in Windows, where every file is
1120	 * a block device */
1121	if (strncmp(f->file_name, "\\\\.\\", 4) == 0)
1122		f->filetype = FIO_TYPE_BD;
1123
1124	if (!stat(f->file_name, &sb)) {
1125		if (S_ISBLK(sb.st_mode))
1126			f->filetype = FIO_TYPE_BD;
1127		else if (S_ISCHR(sb.st_mode))
1128			f->filetype = FIO_TYPE_CHAR;
1129		else if (S_ISFIFO(sb.st_mode))
1130			f->filetype = FIO_TYPE_PIPE;
1131	}
1132}
1133
1134static int __is_already_allocated(const char *fname)
1135{
1136	struct flist_head *entry;
1137	char *filename;
1138
1139	if (flist_empty(&filename_list))
1140		return 0;
1141
1142	flist_for_each(entry, &filename_list) {
1143		filename = flist_entry(entry, struct file_name, list)->filename;
1144
1145		if (strcmp(filename, fname) == 0)
1146			return 1;
1147	}
1148
1149	return 0;
1150}
1151
1152static int is_already_allocated(const char *fname)
1153{
1154	int ret;
1155
1156	fio_file_hash_lock();
1157	ret = __is_already_allocated(fname);
1158	fio_file_hash_unlock();
1159	return ret;
1160}
1161
1162static void set_already_allocated(const char *fname)
1163{
1164	struct file_name *fn;
1165
1166	fn = malloc(sizeof(struct file_name));
1167	fn->filename = strdup(fname);
1168
1169	fio_file_hash_lock();
1170	if (!__is_already_allocated(fname)) {
1171		flist_add_tail(&fn->list, &filename_list);
1172		fn = NULL;
1173	}
1174	fio_file_hash_unlock();
1175
1176	if (fn) {
1177		free(fn->filename);
1178		free(fn);
1179	}
1180}
1181
1182
1183static void free_already_allocated(void)
1184{
1185	struct flist_head *entry, *tmp;
1186	struct file_name *fn;
1187
1188	if (flist_empty(&filename_list))
1189		return;
1190
1191	fio_file_hash_lock();
1192	flist_for_each_safe(entry, tmp, &filename_list) {
1193		fn = flist_entry(entry, struct file_name, list);
1194		free(fn->filename);
1195		flist_del(&fn->list);
1196		free(fn);
1197	}
1198
1199	fio_file_hash_unlock();
1200}
1201
1202static struct fio_file *alloc_new_file(struct thread_data *td)
1203{
1204	struct fio_file *f;
1205
1206	f = smalloc(sizeof(*f));
1207	if (!f) {
1208		log_err("fio: smalloc OOM\n");
1209		assert(0);
1210		return NULL;
1211	}
1212
1213	f->fd = -1;
1214	f->shadow_fd = -1;
1215	fio_file_reset(td, f);
1216	return f;
1217}
1218
1219int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1220{
1221	int cur_files = td->files_index;
1222	char file_name[PATH_MAX];
1223	struct fio_file *f;
1224	int len = 0;
1225
1226	dprint(FD_FILE, "add file %s\n", fname);
1227
1228	if (td->o.directory)
1229		len = set_name_idx(file_name, td->o.directory, numjob);
1230
1231	sprintf(file_name + len, "%s", fname);
1232
1233	/* clean cloned siblings using existing files */
1234	if (numjob && is_already_allocated(file_name))
1235		return 0;
1236
1237	f = alloc_new_file(td);
1238
1239	if (td->files_size <= td->files_index) {
1240		unsigned int new_size = td->o.nr_files + 1;
1241
1242		dprint(FD_FILE, "resize file array to %d files\n", new_size);
1243
1244		td->files = realloc(td->files, new_size * sizeof(f));
1245		if (td->files == NULL) {
1246			log_err("fio: realloc OOM\n");
1247			assert(0);
1248		}
1249		if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1250			td->file_locks = realloc(td->file_locks, new_size);
1251			if (!td->file_locks) {
1252				log_err("fio: realloc OOM\n");
1253				assert(0);
1254			}
1255			td->file_locks[cur_files] = FILE_LOCK_NONE;
1256		}
1257		td->files_size = new_size;
1258	}
1259	td->files[cur_files] = f;
1260	f->fileno = cur_files;
1261
1262	/*
1263	 * init function, io engine may not be loaded yet
1264	 */
1265	if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1266		f->real_file_size = -1ULL;
1267
1268	f->file_name = smalloc_strdup(file_name);
1269	if (!f->file_name) {
1270		log_err("fio: smalloc OOM\n");
1271		assert(0);
1272	}
1273
1274	get_file_type(f);
1275
1276	switch (td->o.file_lock_mode) {
1277	case FILE_LOCK_NONE:
1278		break;
1279	case FILE_LOCK_READWRITE:
1280		f->rwlock = fio_rwlock_init();
1281		break;
1282	case FILE_LOCK_EXCLUSIVE:
1283		f->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
1284		break;
1285	default:
1286		log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1287		assert(0);
1288	}
1289
1290	td->files_index++;
1291	if (f->filetype == FIO_TYPE_FILE)
1292		td->nr_normal_files++;
1293
1294	set_already_allocated(file_name);
1295
1296	if (inc)
1297		td->o.nr_files++;
1298
1299	dprint(FD_FILE, "file %p \"%s\" added at %d\n", f, f->file_name,
1300							cur_files);
1301
1302	return cur_files;
1303}
1304
1305int add_file_exclusive(struct thread_data *td, const char *fname)
1306{
1307	struct fio_file *f;
1308	unsigned int i;
1309
1310	for_each_file(td, f, i) {
1311		if (!strcmp(f->file_name, fname))
1312			return i;
1313	}
1314
1315	return add_file(td, fname, 0, 1);
1316}
1317
1318void get_file(struct fio_file *f)
1319{
1320	dprint(FD_FILE, "get file %s, ref=%d\n", f->file_name, f->references);
1321	assert(fio_file_open(f));
1322	f->references++;
1323}
1324
1325int put_file(struct thread_data *td, struct fio_file *f)
1326{
1327	int f_ret = 0, ret = 0;
1328
1329	dprint(FD_FILE, "put file %s, ref=%d\n", f->file_name, f->references);
1330
1331	if (!fio_file_open(f)) {
1332		assert(f->fd == -1);
1333		return 0;
1334	}
1335
1336	assert(f->references);
1337	if (--f->references)
1338		return 0;
1339
1340	if (should_fsync(td) && td->o.fsync_on_close) {
1341		f_ret = fsync(f->fd);
1342		if (f_ret < 0)
1343			f_ret = errno;
1344	}
1345
1346	if (td->io_ops->close_file)
1347		ret = td->io_ops->close_file(td, f);
1348
1349	if (!ret)
1350		ret = f_ret;
1351
1352	td->nr_open_files--;
1353	fio_file_clear_open(f);
1354	assert(f->fd == -1);
1355	return ret;
1356}
1357
1358void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1359{
1360	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1361		return;
1362
1363	if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1364		if (ddir == DDIR_READ)
1365			fio_rwlock_read(f->rwlock);
1366		else
1367			fio_rwlock_write(f->rwlock);
1368	} else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1369		fio_mutex_down(f->lock);
1370
1371	td->file_locks[f->fileno] = td->o.file_lock_mode;
1372}
1373
1374void unlock_file(struct thread_data *td, struct fio_file *f)
1375{
1376	if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1377		return;
1378
1379	if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1380		fio_rwlock_unlock(f->rwlock);
1381	else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1382		fio_mutex_up(f->lock);
1383
1384	td->file_locks[f->fileno] = FILE_LOCK_NONE;
1385}
1386
1387void unlock_file_all(struct thread_data *td, struct fio_file *f)
1388{
1389	if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1390		return;
1391	if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1392		unlock_file(td, f);
1393}
1394
1395static int recurse_dir(struct thread_data *td, const char *dirname)
1396{
1397	struct dirent *dir;
1398	int ret = 0;
1399	DIR *D;
1400
1401	D = opendir(dirname);
1402	if (!D) {
1403		char buf[FIO_VERROR_SIZE];
1404
1405		snprintf(buf, FIO_VERROR_SIZE, "opendir(%s)", dirname);
1406		td_verror(td, errno, buf);
1407		return 1;
1408	}
1409
1410	while ((dir = readdir(D)) != NULL) {
1411		char full_path[PATH_MAX];
1412		struct stat sb;
1413
1414		if (!strcmp(dir->d_name, ".") || !strcmp(dir->d_name, ".."))
1415			continue;
1416
1417		sprintf(full_path, "%s%s%s", dirname, FIO_OS_PATH_SEPARATOR, dir->d_name);
1418
1419		if (lstat(full_path, &sb) == -1) {
1420			if (errno != ENOENT) {
1421				td_verror(td, errno, "stat");
1422				ret = 1;
1423				break;
1424			}
1425		}
1426
1427		if (S_ISREG(sb.st_mode)) {
1428			add_file(td, full_path, 0, 1);
1429			continue;
1430		}
1431		if (!S_ISDIR(sb.st_mode))
1432			continue;
1433
1434		ret = recurse_dir(td, full_path);
1435		if (ret)
1436			break;
1437	}
1438
1439	closedir(D);
1440	return ret;
1441}
1442
1443int add_dir_files(struct thread_data *td, const char *path)
1444{
1445	int ret = recurse_dir(td, path);
1446
1447	if (!ret)
1448		log_info("fio: opendir added %d files\n", td->o.nr_files);
1449
1450	return ret;
1451}
1452
1453void dup_files(struct thread_data *td, struct thread_data *org)
1454{
1455	struct fio_file *f;
1456	unsigned int i;
1457
1458	dprint(FD_FILE, "dup files: %d\n", org->files_index);
1459
1460	if (!org->files)
1461		return;
1462
1463	td->files = malloc(org->files_index * sizeof(f));
1464
1465	if (td->o.file_lock_mode != FILE_LOCK_NONE)
1466		td->file_locks = malloc(org->files_index);
1467
1468	for_each_file(org, f, i) {
1469		struct fio_file *__f;
1470
1471		__f = alloc_new_file(td);
1472
1473		if (f->file_name) {
1474			__f->file_name = smalloc_strdup(f->file_name);
1475			if (!__f->file_name) {
1476				log_err("fio: smalloc OOM\n");
1477				assert(0);
1478			}
1479
1480			__f->filetype = f->filetype;
1481		}
1482
1483		if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1484			__f->lock = f->lock;
1485		else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1486			__f->rwlock = f->rwlock;
1487
1488		td->files[i] = __f;
1489	}
1490}
1491
1492/*
1493 * Returns the index that matches the filename, or -1 if not there
1494 */
1495int get_fileno(struct thread_data *td, const char *fname)
1496{
1497	struct fio_file *f;
1498	unsigned int i;
1499
1500	for_each_file(td, f, i)
1501		if (!strcmp(f->file_name, fname))
1502			return i;
1503
1504	return -1;
1505}
1506
1507/*
1508 * For log usage, where we add/open/close files automatically
1509 */
1510void free_release_files(struct thread_data *td)
1511{
1512	close_files(td);
1513	td->files_index = 0;
1514	td->nr_normal_files = 0;
1515}
1516
1517void fio_file_reset(struct thread_data *td, struct fio_file *f)
1518{
1519	f->last_pos = f->file_offset;
1520	f->last_start = -1ULL;
1521	if (f->io_axmap)
1522		axmap_reset(f->io_axmap);
1523	if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1524		lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1525}
1526
1527int fio_files_done(struct thread_data *td)
1528{
1529	struct fio_file *f;
1530	unsigned int i;
1531
1532	for_each_file(td, f, i)
1533		if (!fio_file_done(f))
1534			return 0;
1535
1536	return 1;
1537}
1538
1539/* free memory used in initialization phase only */
1540void filesetup_mem_free(void)
1541{
1542	free_already_allocated();
1543}
1544