Lines Matching refs:td

27 static inline void clear_error(struct thread_data *td)
29 td->error = 0;
30 td->verror[0] = '\0';
36 static int extend_file(struct thread_data *td, struct fio_file *f)
53 if (td_read(td) ||
54 (td_write(td) && td->o.overwrite && !td->o.file_append) ||
55 (td_write(td) && td->io_ops->flags & FIO_NOEXTEND))
57 if (td_write(td) && !td->o.overwrite && !td->o.file_append)
63 td_verror(td, errno, "unlink");
79 td_verror(td, errno, "open");
84 if (!td->o.fill_device) {
85 switch (td->o.fallocate_mode) {
109 td_verror(td, errno, "fallocate");
115 td->o.fallocate_mode);
128 if (!td->o.fill_device) {
133 td_verror(td, errno, "ftruncate");
139 b = malloc(td->o.max_bs[DDIR_WRITE]);
142 while (left && !td->terminate) {
143 bs = td->o.max_bs[DDIR_WRITE];
147 fill_io_buffer(td, b, bs, bs);
159 if (td->o.fill_device)
165 td_verror(td, errno, "write");
167 td_verror(td, EIO, "write");
173 if (td->terminate) {
176 } else if (td->o.create_fsync) {
178 td_verror(td, errno, "fsync");
182 if (td->o.fill_device && !td_write(td)) {
184 if (td_io_get_file_size(td, f))
201 static int pre_read_file(struct thread_data *td, struct fio_file *f)
208 if (td->io_ops->flags & FIO_PIPEIO)
212 if (td->io_ops->open_file(td, f)) {
219 old_runstate = td_bump_runstate(td, TD_PRE_READING);
221 bs = td->o.max_bs[DDIR_READ];
226 td_verror(td, errno, "lseek");
234 while (left && !td->terminate) {
244 td_verror(td, EIO, "pre_read");
250 td_restore_runstate(td, old_runstate);
253 td->io_ops->close_file(td, f);
259 static unsigned long long get_rand_file_size(struct thread_data *td)
264 if (td->o.use_os_rand) {
265 r = os_random_long(&td->file_size_state);
266 sized = td->o.file_size_high - td->o.file_size_low;
269 r = __rand(&td->__file_size_state);
270 sized = td->o.file_size_high - td->o.file_size_low;
274 ret += td->o.file_size_low;
275 ret -= (ret % td->o.rw_min_bs);
279 static int file_size(struct thread_data *td, struct fio_file *f)
284 td_verror(td, errno, "fstat");
292 static int bdev_size(struct thread_data *td, struct fio_file *f)
297 if (td->io_ops->open_file(td, f)) {
305 td_verror(td, r, "blockdev_size");
315 td->io_ops->close_file(td, f);
318 td->io_ops->close_file(td, f);
322 static int char_size(struct thread_data *td, struct fio_file *f)
328 if (td->io_ops->open_file(td, f)) {
336 td_verror(td, r, "chardev_size");
346 td->io_ops->close_file(td, f);
349 td->io_ops->close_file(td, f);
357 static int get_file_size(struct thread_data *td, struct fio_file *f)
365 ret = file_size(td, f);
367 ret = bdev_size(td, f);
369 ret = char_size(td, f);
377 log_err("%s: offset extends end (%llu > %llu)\n", td->o.name,
387 static int __file_invalidate_cache(struct thread_data *td, struct fio_file *f,
440 int file_invalidate_cache(struct thread_data *td, struct fio_file *f)
445 return __file_invalidate_cache(td, f, -1ULL, -1ULL);
448 int generic_close_file(struct thread_data fio_unused *td, struct fio_file *f)
496 static int file_close_shadow_fds(struct thread_data *td)
502 for_each_file(td, f, i) {
514 int generic_open_file(struct thread_data *td, struct fio_file *f)
522 if (td_trim(td) && f->filetype != FIO_TYPE_BD) {
528 if (td_rw(td)) {
537 if (td_write(td))
541 if (td_trim(td))
543 if (td->o.odirect)
545 if (td->o.oatomic) {
547 td_verror(td, EINVAL, "OS does not support atomic IO");
552 if (td->o.sync_io)
554 if (td->o.create_on_open)
561 if (td_write(td)) {
572 } else if (td_read(td)) {
582 } else { //td trim
595 if (__e == EMFILE && file_close_shadow_fds(td))
605 td_verror(td, __e, buf);
627 ret = generic_close_file(td, f);
636 int generic_get_file_size(struct thread_data *td, struct fio_file *f)
638 return get_file_size(td, f);
644 static int get_file_sizes(struct thread_data *td)
650 for_each_file(td, f, i) {
654 if (td_io_get_file_size(td, f)) {
655 if (td->error != ENOENT) {
656 log_err("%s\n", td->verror);
659 clear_error(td);
662 if (f->real_file_size == -1ULL && td->o.size)
663 f->real_file_size = td->o.size / td->o.nr_files;
679 static unsigned long long get_fs_free_counts(struct thread_data *td)
688 for_each_file(td, f, i) {
745 uint64_t get_start_offset(struct thread_data *td, struct fio_file *f)
747 struct thread_options *o = &td->o;
752 return td->o.start_offset +
753 (td->thread_number - 1) * td->o.offset_increment;
759 int setup_files(struct thread_data *td)
762 struct thread_options *o = &td->o;
767 const unsigned int bs = td_min_bs(td);
772 old_state = td_bump_runstate(td, TD_SETTING_UP);
782 if (td->io_ops->setup)
783 err = td->io_ops->setup(td);
785 err = get_file_sizes(td);
795 for_each_file(td, f, i) {
803 td->fill_device_size = get_fs_free_counts(td);
809 !(td->io_ops->flags & FIO_NOIO) && !o->fill_device &&
812 td_verror(td, EINVAL, "total_file_size");
837 for_each_file(td, f, i) {
838 f->file_offset = get_start_offset(td, f);
867 f->io_size = get_rand_file_size(td)
883 !(td->io_ops->flags & FIO_DISKLESSIO)) {
896 if (o->size < td_min_bs(td)) {
911 for_each_file(td, f, i) {
925 err = extend_file(td, f);
929 err = __file_invalidate_cache(td, f, old_len,
957 td->total_io_size = o->io_limit * o->loops;
959 td->total_io_size = o->size * o->loops;
964 td->done = 1;
966 td_restore_runstate(td, old_state);
971 td_restore_runstate(td, old_state);
975 int pre_read_files(struct thread_data *td)
982 for_each_file(td, f, i) {
983 pre_read_file(td, f);
989 static int __init_rand_distribution(struct thread_data *td, struct fio_file *f)
995 range_size = min(td->o.min_bs[DDIR_READ], td->o.min_bs[DDIR_WRITE]);
1000 seed = jhash(f->file_name, strlen(f->file_name), 0) * td->thread_number;
1001 if (!td->o.rand_repeatable)
1002 seed = td->rand_seeds[4];
1004 if (td->o.random_distribution == FIO_RAND_DIST_ZIPF)
1005 zipf_init(&f->zipf, nranges, td->o.zipf_theta.u.f, seed);
1007 pareto_init(&f->zipf, nranges, td->o.pareto_h.u.f, seed);
1012 static int init_rand_distribution(struct thread_data *td)
1018 if (td->o.random_distribution == FIO_RAND_DIST_RANDOM)
1021 state = td_bump_runstate(td, TD_SETTING_UP);
1023 for_each_file(td, f, i)
1024 __init_rand_distribution(td, f);
1026 td_restore_runstate(td, state);
1031 int init_random_map(struct thread_data *td)
1037 if (init_rand_distribution(td))
1039 if (!td_random(td))
1042 for_each_file(td, f, i) {
1045 blocks = file_size / (unsigned long long) td->o.rw_min_bs;
1047 if (td->o.random_generator == FIO_RAND_GEN_LFSR) {
1050 seed = td->rand_seeds[FIO_RAND_BLOCK_OFF];
1054 } else if (!td->o.norandommap) {
1058 } else if (td->o.norandommap)
1061 if (!td->o.softrandommap) {
1076 void close_files(struct thread_data *td)
1081 for_each_file(td, f, i) {
1083 td_io_close_file(td, f);
1087 void close_and_free_files(struct thread_data *td)
1094 for_each_file(td, f, i) {
1096 td_io_close_file(td, f);
1100 if (td->o.unlink && f->filetype == FIO_TYPE_FILE) {
1112 td->o.filename = NULL;
1113 free(td->files);
1114 free(td->file_locks);
1115 td->files_index = 0;
1116 td->files = NULL;
1117 td->file_locks = NULL;
1118 td->o.file_lock_mode = FILE_LOCK_NONE;
1119 td->o.nr_files = 0;
1214 static struct fio_file *alloc_new_file(struct thread_data *td)
1227 fio_file_reset(td, f);
1231 int add_file(struct thread_data *td, const char *fname, int numjob, int inc)
1233 int cur_files = td->files_index;
1240 if (td->o.directory)
1241 len = set_name_idx(file_name, td->o.directory, numjob);
1249 f = alloc_new_file(td);
1251 if (td->files_size <= td->files_index) {
1252 unsigned int new_size = td->o.nr_files + 1;
1256 td->files = realloc(td->files, new_size * sizeof(f));
1257 if (td->files == NULL) {
1261 if (td->o.file_lock_mode != FILE_LOCK_NONE) {
1262 td->file_locks = realloc(td->file_locks, new_size);
1263 if (!td->file_locks) {
1267 td->file_locks[cur_files] = FILE_LOCK_NONE;
1269 td->files_size = new_size;
1271 td->files[cur_files] = f;
1277 if (td->io_ops && (td->io_ops->flags & FIO_DISKLESSIO))
1288 switch (td->o.file_lock_mode) {
1298 log_err("fio: unknown lock mode: %d\n", td->o.file_lock_mode);
1302 td->files_index++;
1304 td->nr_normal_files++;
1309 td->o.nr_files++;
1317 int add_file_exclusive(struct thread_data *td, const char *fname)
1322 for_each_file(td, f, i) {
1327 return add_file(td, fname, 0, 1);
1337 int put_file(struct thread_data *td, struct fio_file *f)
1352 if (should_fsync(td) && td->o.fsync_on_close) {
1358 if (td->io_ops->close_file)
1359 ret = td->io_ops->close_file(td, f);
1364 td->nr_open_files--;
1370 void lock_file(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir)
1372 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1375 if (td->o.file_lock_mode == FILE_LOCK_READWRITE) {
1380 } else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1383 td->file_locks[f->fileno] = td->o.file_lock_mode;
1386 void unlock_file(struct thread_data *td, struct fio_file *f)
1388 if (!f->lock || td->o.file_lock_mode == FILE_LOCK_NONE)
1391 if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1393 else if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1396 td->file_locks[f->fileno] = FILE_LOCK_NONE;
1399 void unlock_file_all(struct thread_data *td, struct fio_file *f)
1401 if (td->o.file_lock_mode == FILE_LOCK_NONE || !td->file_locks)
1403 if (td->file_locks[f->fileno] != FILE_LOCK_NONE)
1404 unlock_file(td, f);
1407 static int recurse_dir(struct thread_data *td, const char *dirname)
1418 td_verror(td, errno, buf);
1433 td_verror(td, errno, "stat");
1440 add_file(td, full_path, 0, 1);
1446 ret = recurse_dir(td, full_path);
1455 int add_dir_files(struct thread_data *td, const char *path)
1457 int ret = recurse_dir(td, path);
1460 log_info("fio: opendir added %d files\n", td->o.nr_files);
1465 void dup_files(struct thread_data *td, struct thread_data *org)
1475 td->files = malloc(org->files_index * sizeof(f));
1477 if (td->o.file_lock_mode != FILE_LOCK_NONE)
1478 td->file_locks = malloc(org->files_index);
1483 __f = alloc_new_file(td);
1495 if (td->o.file_lock_mode == FILE_LOCK_EXCLUSIVE)
1497 else if (td->o.file_lock_mode == FILE_LOCK_READWRITE)
1500 td->files[i] = __f;
1507 int get_fileno(struct thread_data *td, const char *fname)
1512 for_each_file(td, f, i)
1522 void free_release_files(struct thread_data *td)
1524 close_files(td);
1525 td->files_index = 0;
1526 td->nr_normal_files = 0;
1529 void fio_file_reset(struct thread_data *td, struct fio_file *f)
1535 if (td->o.random_generator == FIO_RAND_GEN_LFSR)
1536 lfsr_reset(&f->lfsr, td->rand_seeds[FIO_RAND_BLOCK_OFF]);
1539 int fio_files_done(struct thread_data *td)
1544 for_each_file(td, f, i)