io_u.c revision ce95d651568bd5494e4c4ddc95832715c6760d56
1#include <unistd.h> 2#include <fcntl.h> 3#include <string.h> 4#include <signal.h> 5#include <time.h> 6#include <assert.h> 7 8#include "fio.h" 9#include "hash.h" 10#include "verify.h" 11#include "trim.h" 12#include "lib/rand.h" 13 14struct io_completion_data { 15 int nr; /* input */ 16 17 int error; /* output */ 18 unsigned long bytes_done[2]; /* output */ 19 struct timeval time; /* output */ 20}; 21 22/* 23 * The ->file_map[] contains a map of blocks we have or have not done io 24 * to yet. Used to make sure we cover the entire range in a fair fashion. 25 */ 26static int random_map_free(struct fio_file *f, const unsigned long long block) 27{ 28 unsigned int idx = RAND_MAP_IDX(f, block); 29 unsigned int bit = RAND_MAP_BIT(f, block); 30 31 dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); 32 33 return (f->file_map[idx] & (1UL << bit)) == 0; 34} 35 36/* 37 * Mark a given offset as used in the map. 38 */ 39static void mark_random_map(struct thread_data *td, struct io_u *io_u) 40{ 41 unsigned int min_bs = td->o.rw_min_bs; 42 struct fio_file *f = io_u->file; 43 unsigned long long block; 44 unsigned int blocks, nr_blocks; 45 int busy_check; 46 47 block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; 48 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; 49 blocks = 0; 50 busy_check = !(io_u->flags & IO_U_F_BUSY_OK); 51 52 while (nr_blocks) { 53 unsigned int idx, bit; 54 unsigned long mask, this_blocks; 55 56 /* 57 * If we have a mixed random workload, we may 58 * encounter blocks we already did IO to. 59 */ 60 if (!busy_check) { 61 blocks = nr_blocks; 62 break; 63 } 64 if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) 65 break; 66 67 idx = RAND_MAP_IDX(f, block); 68 bit = RAND_MAP_BIT(f, block); 69 70 fio_assert(td, idx < f->num_maps); 71 72 this_blocks = nr_blocks; 73 if (this_blocks + bit > BLOCKS_PER_MAP) 74 this_blocks = BLOCKS_PER_MAP - bit; 75 76 do { 77 if (this_blocks == BLOCKS_PER_MAP) 78 mask = -1UL; 79 else 80 mask = ((1UL << this_blocks) - 1) << bit; 81 82 if (!(f->file_map[idx] & mask)) 83 break; 84 85 this_blocks--; 86 } while (this_blocks); 87 88 if (!this_blocks) 89 break; 90 91 f->file_map[idx] |= mask; 92 nr_blocks -= this_blocks; 93 blocks += this_blocks; 94 block += this_blocks; 95 } 96 97 if ((blocks * min_bs) < io_u->buflen) 98 io_u->buflen = blocks * min_bs; 99} 100 101static unsigned long long last_block(struct thread_data *td, struct fio_file *f, 102 enum fio_ddir ddir) 103{ 104 unsigned long long max_blocks; 105 unsigned long long max_size; 106 107 assert(ddir_rw(ddir)); 108 109 /* 110 * Hmm, should we make sure that ->io_size <= ->real_file_size? 111 */ 112 max_size = f->io_size; 113 if (max_size > f->real_file_size) 114 max_size = f->real_file_size; 115 116 if (td->o.zone_range) 117 max_size = td->o.zone_range; 118 119 max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; 120 if (!max_blocks) 121 return 0; 122 123 return max_blocks; 124} 125 126/* 127 * Return the next free block in the map. 128 */ 129static int get_next_free_block(struct thread_data *td, struct fio_file *f, 130 enum fio_ddir ddir, unsigned long long *b) 131{ 132 unsigned long long block, min_bs = td->o.rw_min_bs, lastb; 133 int i; 134 135 lastb = last_block(td, f, ddir); 136 if (!lastb) 137 return 1; 138 139 i = f->last_free_lookup; 140 block = i * BLOCKS_PER_MAP; 141 while (block * min_bs < f->real_file_size && 142 block * min_bs < f->io_size) { 143 if (f->file_map[i] != -1UL) { 144 block += ffz(f->file_map[i]); 145 if (block > lastb) 146 break; 147 f->last_free_lookup = i; 148 *b = block; 149 return 0; 150 } 151 152 block += BLOCKS_PER_MAP; 153 i++; 154 } 155 156 dprint(FD_IO, "failed finding a free block\n"); 157 return 1; 158} 159 160static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, 161 enum fio_ddir ddir, unsigned long long *b) 162{ 163 unsigned long long rmax, r, lastb; 164 int loops = 5; 165 166 lastb = last_block(td, f, ddir); 167 if (!lastb) 168 return 1; 169 170 if (f->failed_rands >= 200) 171 goto ffz; 172 173 rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; 174 do { 175 if (td->o.use_os_rand) 176 r = os_random_long(&td->random_state); 177 else 178 r = __rand(&td->__random_state); 179 180 *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0)); 181 182 dprint(FD_RANDOM, "off rand %llu\n", r); 183 184 185 /* 186 * if we are not maintaining a random map, we are done. 187 */ 188 if (!file_randommap(td, f)) 189 goto ret_good; 190 191 /* 192 * calculate map offset and check if it's free 193 */ 194 if (random_map_free(f, *b)) 195 goto ret_good; 196 197 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", 198 *b); 199 } while (--loops); 200 201 if (!f->failed_rands++) 202 f->last_free_lookup = 0; 203 204 /* 205 * we get here, if we didn't suceed in looking up a block. generate 206 * a random start offset into the filemap, and find the first free 207 * block from there. 208 */ 209 loops = 10; 210 do { 211 f->last_free_lookup = (f->num_maps - 1) * 212 (r / ((unsigned long long) rmax + 1.0)); 213 if (!get_next_free_block(td, f, ddir, b)) 214 goto ret; 215 216 if (td->o.use_os_rand) 217 r = os_random_long(&td->random_state); 218 else 219 r = __rand(&td->__random_state); 220 } while (--loops); 221 222 /* 223 * that didn't work either, try exhaustive search from the start 224 */ 225 f->last_free_lookup = 0; 226ffz: 227 if (!get_next_free_block(td, f, ddir, b)) 228 return 0; 229 f->last_free_lookup = 0; 230 return get_next_free_block(td, f, ddir, b); 231ret_good: 232 f->failed_rands = 0; 233ret: 234 return 0; 235} 236 237static int get_next_rand_block(struct thread_data *td, struct fio_file *f, 238 enum fio_ddir ddir, unsigned long long *b) 239{ 240 if (!get_next_rand_offset(td, f, ddir, b)) 241 return 0; 242 243 if (td->o.time_based) { 244 fio_file_reset(f); 245 if (!get_next_rand_offset(td, f, ddir, b)) 246 return 0; 247 } 248 249 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", 250 f->file_name, f->last_pos, f->real_file_size); 251 return 1; 252} 253 254static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, 255 enum fio_ddir ddir, unsigned long long *offset) 256{ 257 assert(ddir_rw(ddir)); 258 259 if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based) 260 f->last_pos = f->last_pos - f->io_size; 261 262 if (f->last_pos < f->real_file_size) { 263 unsigned long long pos; 264 265 if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) 266 f->last_pos = f->real_file_size; 267 268 pos = f->last_pos - f->file_offset; 269 if (pos) 270 pos += td->o.ddir_seq_add; 271 272 *offset = pos; 273 return 0; 274 } 275 276 return 1; 277} 278 279static int get_next_block(struct thread_data *td, struct io_u *io_u, 280 enum fio_ddir ddir, int rw_seq) 281{ 282 struct fio_file *f = io_u->file; 283 unsigned long long b, offset; 284 int ret; 285 286 assert(ddir_rw(ddir)); 287 288 b = offset = -1ULL; 289 290 if (rw_seq) { 291 if (td_random(td)) 292 ret = get_next_rand_block(td, f, ddir, &b); 293 else 294 ret = get_next_seq_offset(td, f, ddir, &offset); 295 } else { 296 io_u->flags |= IO_U_F_BUSY_OK; 297 298 if (td->o.rw_seq == RW_SEQ_SEQ) { 299 ret = get_next_seq_offset(td, f, ddir, &offset); 300 if (ret) 301 ret = get_next_rand_block(td, f, ddir, &b); 302 } else if (td->o.rw_seq == RW_SEQ_IDENT) { 303 if (f->last_start != -1ULL) 304 offset = f->last_start - f->file_offset; 305 else 306 offset = 0; 307 ret = 0; 308 } else { 309 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); 310 ret = 1; 311 } 312 } 313 314 if (!ret) { 315 if (offset != -1ULL) 316 io_u->offset = offset; 317 else if (b != -1ULL) 318 io_u->offset = b * td->o.ba[ddir]; 319 else { 320 log_err("fio: bug in offset generation\n"); 321 ret = 1; 322 } 323 } 324 325 return ret; 326} 327 328/* 329 * For random io, generate a random new block and see if it's used. Repeat 330 * until we find a free one. For sequential io, just return the end of 331 * the last io issued. 332 */ 333static int __get_next_offset(struct thread_data *td, struct io_u *io_u) 334{ 335 struct fio_file *f = io_u->file; 336 enum fio_ddir ddir = io_u->ddir; 337 int rw_seq_hit = 0; 338 339 assert(ddir_rw(ddir)); 340 341 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { 342 rw_seq_hit = 1; 343 td->ddir_seq_nr = td->o.ddir_seq_nr; 344 } 345 346 if (get_next_block(td, io_u, ddir, rw_seq_hit)) 347 return 1; 348 349 if (io_u->offset >= f->io_size) { 350 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", 351 io_u->offset, f->io_size); 352 return 1; 353 } 354 355 io_u->offset += f->file_offset; 356 if (io_u->offset >= f->real_file_size) { 357 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", 358 io_u->offset, f->real_file_size); 359 return 1; 360 } 361 362 return 0; 363} 364 365static int get_next_offset(struct thread_data *td, struct io_u *io_u) 366{ 367 struct prof_io_ops *ops = &td->prof_io_ops; 368 369 if (ops->fill_io_u_off) 370 return ops->fill_io_u_off(td, io_u); 371 372 return __get_next_offset(td, io_u); 373} 374 375static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, 376 unsigned int buflen) 377{ 378 struct fio_file *f = io_u->file; 379 380 return io_u->offset + buflen <= f->io_size + get_start_offset(td); 381} 382 383static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) 384{ 385 const int ddir = io_u->ddir; 386 unsigned int uninitialized_var(buflen); 387 unsigned int minbs, maxbs; 388 unsigned long r, rand_max; 389 390 assert(ddir_rw(ddir)); 391 392 minbs = td->o.min_bs[ddir]; 393 maxbs = td->o.max_bs[ddir]; 394 395 if (minbs == maxbs) 396 return minbs; 397 398 /* 399 * If we can't satisfy the min block size from here, then fail 400 */ 401 if (!io_u_fits(td, io_u, minbs)) 402 return 0; 403 404 if (td->o.use_os_rand) 405 rand_max = OS_RAND_MAX; 406 else 407 rand_max = FRAND_MAX; 408 409 do { 410 if (td->o.use_os_rand) 411 r = os_random_long(&td->bsrange_state); 412 else 413 r = __rand(&td->__bsrange_state); 414 415 if (!td->o.bssplit_nr[ddir]) { 416 buflen = 1 + (unsigned int) ((double) maxbs * 417 (r / (rand_max + 1.0))); 418 if (buflen < minbs) 419 buflen = minbs; 420 } else { 421 long perc = 0; 422 unsigned int i; 423 424 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { 425 struct bssplit *bsp = &td->o.bssplit[ddir][i]; 426 427 buflen = bsp->bs; 428 perc += bsp->perc; 429 if ((r <= ((rand_max / 100L) * perc)) && 430 io_u_fits(td, io_u, buflen)) 431 break; 432 } 433 } 434 435 if (!td->o.bs_unaligned && is_power_of_2(minbs)) 436 buflen = (buflen + minbs - 1) & ~(minbs - 1); 437 438 } while (!io_u_fits(td, io_u, buflen)); 439 440 return buflen; 441} 442 443static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) 444{ 445 struct prof_io_ops *ops = &td->prof_io_ops; 446 447 if (ops->fill_io_u_size) 448 return ops->fill_io_u_size(td, io_u); 449 450 return __get_next_buflen(td, io_u); 451} 452 453static void set_rwmix_bytes(struct thread_data *td) 454{ 455 unsigned int diff; 456 457 /* 458 * we do time or byte based switch. this is needed because 459 * buffered writes may issue a lot quicker than they complete, 460 * whereas reads do not. 461 */ 462 diff = td->o.rwmix[td->rwmix_ddir ^ 1]; 463 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; 464} 465 466static inline enum fio_ddir get_rand_ddir(struct thread_data *td) 467{ 468 unsigned int v; 469 unsigned long r; 470 471 if (td->o.use_os_rand) { 472 r = os_random_long(&td->rwmix_state); 473 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); 474 } else { 475 r = __rand(&td->__rwmix_state); 476 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); 477 } 478 479 if (v <= td->o.rwmix[DDIR_READ]) 480 return DDIR_READ; 481 482 return DDIR_WRITE; 483} 484 485static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) 486{ 487 enum fio_ddir odir = ddir ^ 1; 488 struct timeval t; 489 long usec; 490 491 assert(ddir_rw(ddir)); 492 493 if (td->rate_pending_usleep[ddir] <= 0) 494 return ddir; 495 496 /* 497 * We have too much pending sleep in this direction. See if we 498 * should switch. 499 */ 500 if (td_rw(td)) { 501 /* 502 * Other direction does not have too much pending, switch 503 */ 504 if (td->rate_pending_usleep[odir] < 100000) 505 return odir; 506 507 /* 508 * Both directions have pending sleep. Sleep the minimum time 509 * and deduct from both. 510 */ 511 if (td->rate_pending_usleep[ddir] <= 512 td->rate_pending_usleep[odir]) { 513 usec = td->rate_pending_usleep[ddir]; 514 } else { 515 usec = td->rate_pending_usleep[odir]; 516 ddir = odir; 517 } 518 } else 519 usec = td->rate_pending_usleep[ddir]; 520 521 /* 522 * We are going to sleep, ensure that we flush anything pending as 523 * not to skew our latency numbers. 524 * 525 * Changed to only monitor 'in flight' requests here instead of the 526 * td->cur_depth, b/c td->cur_depth does not accurately represent 527 * io's that have been actually submitted to an async engine, 528 * and cur_depth is meaningless for sync engines. 529 */ 530 if (td->io_u_in_flight) { 531 int fio_unused ret; 532 533 ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); 534 } 535 536 fio_gettime(&t, NULL); 537 usec_sleep(td, usec); 538 usec = utime_since_now(&t); 539 540 td->rate_pending_usleep[ddir] -= usec; 541 542 odir = ddir ^ 1; 543 if (td_rw(td) && __should_check_rate(td, odir)) 544 td->rate_pending_usleep[odir] -= usec; 545 546 return ddir; 547} 548 549/* 550 * Return the data direction for the next io_u. If the job is a 551 * mixed read/write workload, check the rwmix cycle and switch if 552 * necessary. 553 */ 554static enum fio_ddir get_rw_ddir(struct thread_data *td) 555{ 556 enum fio_ddir ddir; 557 558 /* 559 * see if it's time to fsync 560 */ 561 if (td->o.fsync_blocks && 562 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && 563 td->io_issues[DDIR_WRITE] && should_fsync(td)) 564 return DDIR_SYNC; 565 566 /* 567 * see if it's time to fdatasync 568 */ 569 if (td->o.fdatasync_blocks && 570 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && 571 td->io_issues[DDIR_WRITE] && should_fsync(td)) 572 return DDIR_DATASYNC; 573 574 /* 575 * see if it's time to sync_file_range 576 */ 577 if (td->sync_file_range_nr && 578 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && 579 td->io_issues[DDIR_WRITE] && should_fsync(td)) 580 return DDIR_SYNC_FILE_RANGE; 581 582 if (td_rw(td)) { 583 /* 584 * Check if it's time to seed a new data direction. 585 */ 586 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { 587 /* 588 * Put a top limit on how many bytes we do for 589 * one data direction, to avoid overflowing the 590 * ranges too much 591 */ 592 ddir = get_rand_ddir(td); 593 594 if (ddir != td->rwmix_ddir) 595 set_rwmix_bytes(td); 596 597 td->rwmix_ddir = ddir; 598 } 599 ddir = td->rwmix_ddir; 600 } else if (td_read(td)) 601 ddir = DDIR_READ; 602 else 603 ddir = DDIR_WRITE; 604 605 td->rwmix_ddir = rate_ddir(td, ddir); 606 return td->rwmix_ddir; 607} 608 609static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) 610{ 611 io_u->ddir = get_rw_ddir(td); 612 613 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && 614 td->o.barrier_blocks && 615 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && 616 td->io_issues[DDIR_WRITE]) 617 io_u->flags |= IO_U_F_BARRIER; 618} 619 620void put_file_log(struct thread_data *td, struct fio_file *f) 621{ 622 int ret = put_file(td, f); 623 624 if (ret) 625 td_verror(td, ret, "file close"); 626} 627 628void put_io_u(struct thread_data *td, struct io_u *io_u) 629{ 630 td_io_u_lock(td); 631 632 if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF)) 633 put_file_log(td, io_u->file); 634 io_u->file = NULL; 635 io_u->flags &= ~IO_U_F_FREE_DEF; 636 io_u->flags |= IO_U_F_FREE; 637 638 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) 639 td->cur_depth--; 640 flist_del_init(&io_u->list); 641 flist_add(&io_u->list, &td->io_u_freelist); 642 td_io_u_unlock(td); 643 td_io_u_free_notify(td); 644} 645 646void clear_io_u(struct thread_data *td, struct io_u *io_u) 647{ 648 io_u->flags &= ~IO_U_F_FLIGHT; 649 put_io_u(td, io_u); 650} 651 652void requeue_io_u(struct thread_data *td, struct io_u **io_u) 653{ 654 struct io_u *__io_u = *io_u; 655 656 dprint(FD_IO, "requeue %p\n", __io_u); 657 658 td_io_u_lock(td); 659 660 __io_u->flags |= IO_U_F_FREE; 661 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) 662 td->io_issues[__io_u->ddir]--; 663 664 __io_u->flags &= ~IO_U_F_FLIGHT; 665 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) 666 td->cur_depth--; 667 flist_del(&__io_u->list); 668 flist_add_tail(&__io_u->list, &td->io_u_requeues); 669 td_io_u_unlock(td); 670 *io_u = NULL; 671} 672 673static int fill_io_u(struct thread_data *td, struct io_u *io_u) 674{ 675 if (td->io_ops->flags & FIO_NOIO) 676 goto out; 677 678 set_rw_ddir(td, io_u); 679 680 /* 681 * fsync() or fdatasync() or trim etc, we are done 682 */ 683 if (!ddir_rw(io_u->ddir)) 684 goto out; 685 686 /* 687 * See if it's time to switch to a new zone 688 */ 689 if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { 690 td->zone_bytes = 0; 691 io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; 692 io_u->file->last_pos = io_u->file->file_offset; 693 td->io_skip_bytes += td->o.zone_skip; 694 } 695 696 /* 697 * No log, let the seq/rand engine retrieve the next buflen and 698 * position. 699 */ 700 if (get_next_offset(td, io_u)) { 701 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); 702 return 1; 703 } 704 705 io_u->buflen = get_next_buflen(td, io_u); 706 if (!io_u->buflen) { 707 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); 708 return 1; 709 } 710 711 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { 712 dprint(FD_IO, "io_u %p, offset too large\n", io_u); 713 dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, 714 io_u->buflen, io_u->file->real_file_size); 715 return 1; 716 } 717 718 /* 719 * mark entry before potentially trimming io_u 720 */ 721 if (td_random(td) && file_randommap(td, io_u->file)) 722 mark_random_map(td, io_u); 723 724 /* 725 * If using a write iolog, store this entry. 726 */ 727out: 728 dprint_io_u(io_u, "fill_io_u"); 729 td->zone_bytes += io_u->buflen; 730 log_io_u(td, io_u); 731 return 0; 732} 733 734static void __io_u_mark_map(unsigned int *map, unsigned int nr) 735{ 736 int idx = 0; 737 738 switch (nr) { 739 default: 740 idx = 6; 741 break; 742 case 33 ... 64: 743 idx = 5; 744 break; 745 case 17 ... 32: 746 idx = 4; 747 break; 748 case 9 ... 16: 749 idx = 3; 750 break; 751 case 5 ... 8: 752 idx = 2; 753 break; 754 case 1 ... 4: 755 idx = 1; 756 case 0: 757 break; 758 } 759 760 map[idx]++; 761} 762 763void io_u_mark_submit(struct thread_data *td, unsigned int nr) 764{ 765 __io_u_mark_map(td->ts.io_u_submit, nr); 766 td->ts.total_submit++; 767} 768 769void io_u_mark_complete(struct thread_data *td, unsigned int nr) 770{ 771 __io_u_mark_map(td->ts.io_u_complete, nr); 772 td->ts.total_complete++; 773} 774 775void io_u_mark_depth(struct thread_data *td, unsigned int nr) 776{ 777 int idx = 0; 778 779 switch (td->cur_depth) { 780 default: 781 idx = 6; 782 break; 783 case 32 ... 63: 784 idx = 5; 785 break; 786 case 16 ... 31: 787 idx = 4; 788 break; 789 case 8 ... 15: 790 idx = 3; 791 break; 792 case 4 ... 7: 793 idx = 2; 794 break; 795 case 2 ... 3: 796 idx = 1; 797 case 1: 798 break; 799 } 800 801 td->ts.io_u_map[idx] += nr; 802} 803 804static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) 805{ 806 int idx = 0; 807 808 assert(usec < 1000); 809 810 switch (usec) { 811 case 750 ... 999: 812 idx = 9; 813 break; 814 case 500 ... 749: 815 idx = 8; 816 break; 817 case 250 ... 499: 818 idx = 7; 819 break; 820 case 100 ... 249: 821 idx = 6; 822 break; 823 case 50 ... 99: 824 idx = 5; 825 break; 826 case 20 ... 49: 827 idx = 4; 828 break; 829 case 10 ... 19: 830 idx = 3; 831 break; 832 case 4 ... 9: 833 idx = 2; 834 break; 835 case 2 ... 3: 836 idx = 1; 837 case 0 ... 1: 838 break; 839 } 840 841 assert(idx < FIO_IO_U_LAT_U_NR); 842 td->ts.io_u_lat_u[idx]++; 843} 844 845static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) 846{ 847 int idx = 0; 848 849 switch (msec) { 850 default: 851 idx = 11; 852 break; 853 case 1000 ... 1999: 854 idx = 10; 855 break; 856 case 750 ... 999: 857 idx = 9; 858 break; 859 case 500 ... 749: 860 idx = 8; 861 break; 862 case 250 ... 499: 863 idx = 7; 864 break; 865 case 100 ... 249: 866 idx = 6; 867 break; 868 case 50 ... 99: 869 idx = 5; 870 break; 871 case 20 ... 49: 872 idx = 4; 873 break; 874 case 10 ... 19: 875 idx = 3; 876 break; 877 case 4 ... 9: 878 idx = 2; 879 break; 880 case 2 ... 3: 881 idx = 1; 882 case 0 ... 1: 883 break; 884 } 885 886 assert(idx < FIO_IO_U_LAT_M_NR); 887 td->ts.io_u_lat_m[idx]++; 888} 889 890static void io_u_mark_latency(struct thread_data *td, unsigned long usec) 891{ 892 if (usec < 1000) 893 io_u_mark_lat_usec(td, usec); 894 else 895 io_u_mark_lat_msec(td, usec / 1000); 896} 897 898/* 899 * Get next file to service by choosing one at random 900 */ 901static struct fio_file *get_next_file_rand(struct thread_data *td, 902 enum fio_file_flags goodf, 903 enum fio_file_flags badf) 904{ 905 struct fio_file *f; 906 int fno; 907 908 do { 909 int opened = 0; 910 unsigned long r; 911 912 if (td->o.use_os_rand) { 913 r = os_random_long(&td->next_file_state); 914 fno = (unsigned int) ((double) td->o.nr_files 915 * (r / (OS_RAND_MAX + 1.0))); 916 } else { 917 r = __rand(&td->__next_file_state); 918 fno = (unsigned int) ((double) td->o.nr_files 919 * (r / (FRAND_MAX + 1.0))); 920 } 921 922 f = td->files[fno]; 923 if (fio_file_done(f)) 924 continue; 925 926 if (!fio_file_open(f)) { 927 int err; 928 929 err = td_io_open_file(td, f); 930 if (err) 931 continue; 932 opened = 1; 933 } 934 935 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { 936 dprint(FD_FILE, "get_next_file_rand: %p\n", f); 937 return f; 938 } 939 if (opened) 940 td_io_close_file(td, f); 941 } while (1); 942} 943 944/* 945 * Get next file to service by doing round robin between all available ones 946 */ 947static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, 948 int badf) 949{ 950 unsigned int old_next_file = td->next_file; 951 struct fio_file *f; 952 953 do { 954 int opened = 0; 955 956 f = td->files[td->next_file]; 957 958 td->next_file++; 959 if (td->next_file >= td->o.nr_files) 960 td->next_file = 0; 961 962 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); 963 if (fio_file_done(f)) { 964 f = NULL; 965 continue; 966 } 967 968 if (!fio_file_open(f)) { 969 int err; 970 971 err = td_io_open_file(td, f); 972 if (err) { 973 dprint(FD_FILE, "error %d on open of %s\n", 974 err, f->file_name); 975 f = NULL; 976 continue; 977 } 978 opened = 1; 979 } 980 981 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, 982 f->flags); 983 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) 984 break; 985 986 if (opened) 987 td_io_close_file(td, f); 988 989 f = NULL; 990 } while (td->next_file != old_next_file); 991 992 dprint(FD_FILE, "get_next_file_rr: %p\n", f); 993 return f; 994} 995 996static struct fio_file *__get_next_file(struct thread_data *td) 997{ 998 struct fio_file *f; 999 1000 assert(td->o.nr_files <= td->files_index); 1001 1002 if (td->nr_done_files >= td->o.nr_files) { 1003 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," 1004 " nr_files=%d\n", td->nr_open_files, 1005 td->nr_done_files, 1006 td->o.nr_files); 1007 return NULL; 1008 } 1009 1010 f = td->file_service_file; 1011 if (f && fio_file_open(f) && !fio_file_closing(f)) { 1012 if (td->o.file_service_type == FIO_FSERVICE_SEQ) 1013 goto out; 1014 if (td->file_service_left--) 1015 goto out; 1016 } 1017 1018 if (td->o.file_service_type == FIO_FSERVICE_RR || 1019 td->o.file_service_type == FIO_FSERVICE_SEQ) 1020 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); 1021 else 1022 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); 1023 1024 td->file_service_file = f; 1025 td->file_service_left = td->file_service_nr - 1; 1026out: 1027 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); 1028 return f; 1029} 1030 1031static struct fio_file *get_next_file(struct thread_data *td) 1032{ 1033 struct prof_io_ops *ops = &td->prof_io_ops; 1034 1035 if (ops->get_next_file) 1036 return ops->get_next_file(td); 1037 1038 return __get_next_file(td); 1039} 1040 1041static int set_io_u_file(struct thread_data *td, struct io_u *io_u) 1042{ 1043 struct fio_file *f; 1044 1045 do { 1046 f = get_next_file(td); 1047 if (!f) 1048 return 1; 1049 1050 io_u->file = f; 1051 get_file(f); 1052 1053 if (!fill_io_u(td, io_u)) 1054 break; 1055 1056 put_file_log(td, f); 1057 td_io_close_file(td, f); 1058 io_u->file = NULL; 1059 fio_file_set_done(f); 1060 td->nr_done_files++; 1061 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, 1062 td->nr_done_files, td->o.nr_files); 1063 } while (1); 1064 1065 return 0; 1066} 1067 1068 1069struct io_u *__get_io_u(struct thread_data *td) 1070{ 1071 struct io_u *io_u = NULL; 1072 1073 td_io_u_lock(td); 1074 1075again: 1076 if (!flist_empty(&td->io_u_requeues)) 1077 io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); 1078 else if (!queue_full(td)) { 1079 io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); 1080 1081 io_u->buflen = 0; 1082 io_u->resid = 0; 1083 io_u->file = NULL; 1084 io_u->end_io = NULL; 1085 } 1086 1087 if (io_u) { 1088 assert(io_u->flags & IO_U_F_FREE); 1089 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); 1090 io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER); 1091 io_u->flags &= ~IO_U_F_VER_LIST; 1092 1093 io_u->error = 0; 1094 flist_del(&io_u->list); 1095 flist_add(&io_u->list, &td->io_u_busylist); 1096 td->cur_depth++; 1097 io_u->flags |= IO_U_F_IN_CUR_DEPTH; 1098 } else if (td->o.verify_async) { 1099 /* 1100 * We ran out, wait for async verify threads to finish and 1101 * return one 1102 */ 1103 pthread_cond_wait(&td->free_cond, &td->io_u_lock); 1104 goto again; 1105 } 1106 1107 td_io_u_unlock(td); 1108 return io_u; 1109} 1110 1111static int check_get_trim(struct thread_data *td, struct io_u *io_u) 1112{ 1113 if (td->o.trim_backlog && td->trim_entries) { 1114 int get_trim = 0; 1115 1116 if (td->trim_batch) { 1117 td->trim_batch--; 1118 get_trim = 1; 1119 } else if (!(td->io_hist_len % td->o.trim_backlog) && 1120 td->last_ddir != DDIR_READ) { 1121 td->trim_batch = td->o.trim_batch; 1122 if (!td->trim_batch) 1123 td->trim_batch = td->o.trim_backlog; 1124 get_trim = 1; 1125 } 1126 1127 if (get_trim && !get_next_trim(td, io_u)) 1128 return 1; 1129 } 1130 1131 return 0; 1132} 1133 1134static int check_get_verify(struct thread_data *td, struct io_u *io_u) 1135{ 1136 if (td->o.verify_backlog && td->io_hist_len) { 1137 int get_verify = 0; 1138 1139 if (td->verify_batch) 1140 get_verify = 1; 1141 else if (!(td->io_hist_len % td->o.verify_backlog) && 1142 td->last_ddir != DDIR_READ) { 1143 td->verify_batch = td->o.verify_batch; 1144 if (!td->verify_batch) 1145 td->verify_batch = td->o.verify_backlog; 1146 get_verify = 1; 1147 } 1148 1149 if (get_verify && !get_next_verify(td, io_u)) { 1150 td->verify_batch--; 1151 return 1; 1152 } 1153 } 1154 1155 return 0; 1156} 1157 1158/* 1159 * Fill offset and start time into the buffer content, to prevent too 1160 * easy compressible data for simple de-dupe attempts. Do this for every 1161 * 512b block in the range, since that should be the smallest block size 1162 * we can expect from a device. 1163 */ 1164static void small_content_scramble(struct io_u *io_u) 1165{ 1166 unsigned int i, nr_blocks = io_u->buflen / 512; 1167 unsigned long long boffset; 1168 unsigned int offset; 1169 void *p, *end; 1170 1171 if (!nr_blocks) 1172 return; 1173 1174 p = io_u->xfer_buf; 1175 boffset = io_u->offset; 1176 io_u->buf_filled_len = 0; 1177 1178 for (i = 0; i < nr_blocks; i++) { 1179 /* 1180 * Fill the byte offset into a "random" start offset of 1181 * the buffer, given by the product of the usec time 1182 * and the actual offset. 1183 */ 1184 offset = (io_u->start_time.tv_usec ^ boffset) & 511; 1185 offset &= ~(sizeof(unsigned long long) - 1); 1186 if (offset >= 512 - sizeof(unsigned long long)) 1187 offset -= sizeof(unsigned long long); 1188 memcpy(p + offset, &boffset, sizeof(boffset)); 1189 1190 end = p + 512 - sizeof(io_u->start_time); 1191 memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); 1192 p += 512; 1193 boffset += 512; 1194 } 1195} 1196 1197/* 1198 * Return an io_u to be processed. Gets a buflen and offset, sets direction, 1199 * etc. The returned io_u is fully ready to be prepped and submitted. 1200 */ 1201struct io_u *get_io_u(struct thread_data *td) 1202{ 1203 struct fio_file *f; 1204 struct io_u *io_u; 1205 int do_scramble = 0; 1206 1207 io_u = __get_io_u(td); 1208 if (!io_u) { 1209 dprint(FD_IO, "__get_io_u failed\n"); 1210 return NULL; 1211 } 1212 1213 if (check_get_verify(td, io_u)) 1214 goto out; 1215 if (check_get_trim(td, io_u)) 1216 goto out; 1217 1218 /* 1219 * from a requeue, io_u already setup 1220 */ 1221 if (io_u->file) 1222 goto out; 1223 1224 /* 1225 * If using an iolog, grab next piece if any available. 1226 */ 1227 if (td->o.read_iolog_file) { 1228 if (read_iolog_get(td, io_u)) 1229 goto err_put; 1230 } else if (set_io_u_file(td, io_u)) { 1231 dprint(FD_IO, "io_u %p, setting file failed\n", io_u); 1232 goto err_put; 1233 } 1234 1235 f = io_u->file; 1236 assert(fio_file_open(f)); 1237 1238 if (ddir_rw(io_u->ddir)) { 1239 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { 1240 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); 1241 goto err_put; 1242 } 1243 1244 f->last_start = io_u->offset; 1245 f->last_pos = io_u->offset + io_u->buflen; 1246 1247 if (io_u->ddir == DDIR_WRITE) { 1248 if (td->o.refill_buffers) { 1249 io_u_fill_buffer(td, io_u, 1250 io_u->xfer_buflen, io_u->xfer_buflen); 1251 } else if (td->o.scramble_buffers) 1252 do_scramble = 1; 1253 if (td->o.verify != VERIFY_NONE) { 1254 populate_verify_io_u(td, io_u); 1255 do_scramble = 0; 1256 } 1257 } else if (io_u->ddir == DDIR_READ) { 1258 /* 1259 * Reset the buf_filled parameters so next time if the 1260 * buffer is used for writes it is refilled. 1261 */ 1262 io_u->buf_filled_len = 0; 1263 } 1264 } 1265 1266 /* 1267 * Set io data pointers. 1268 */ 1269 io_u->xfer_buf = io_u->buf; 1270 io_u->xfer_buflen = io_u->buflen; 1271 1272out: 1273 assert(io_u->file); 1274 if (!td_io_prep(td, io_u)) { 1275 if (!td->o.disable_slat) 1276 fio_gettime(&io_u->start_time, NULL); 1277 if (do_scramble) 1278 small_content_scramble(io_u); 1279 return io_u; 1280 } 1281err_put: 1282 dprint(FD_IO, "get_io_u failed\n"); 1283 put_io_u(td, io_u); 1284 return NULL; 1285} 1286 1287void io_u_log_error(struct thread_data *td, struct io_u *io_u) 1288{ 1289 const char *msg[] = { "read", "write", "sync", "datasync", 1290 "sync_file_range", "wait", "trim" }; 1291 1292 1293 1294 log_err("fio: io_u error"); 1295 1296 if (io_u->file) 1297 log_err(" on file %s", io_u->file->file_name); 1298 1299 log_err(": %s\n", strerror(io_u->error)); 1300 1301 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], 1302 io_u->offset, io_u->xfer_buflen); 1303 1304 if (!td->error) 1305 td_verror(td, io_u->error, "io_u error"); 1306} 1307 1308static void account_io_completion(struct thread_data *td, struct io_u *io_u, 1309 struct io_completion_data *icd, 1310 const enum fio_ddir idx, unsigned int bytes) 1311{ 1312 unsigned long uninitialized_var(lusec); 1313 1314 if (!td->o.disable_clat || !td->o.disable_bw) 1315 lusec = utime_since(&io_u->issue_time, &icd->time); 1316 1317 if (!td->o.disable_lat) { 1318 unsigned long tusec; 1319 1320 tusec = utime_since(&io_u->start_time, &icd->time); 1321 add_lat_sample(td, idx, tusec, bytes); 1322 } 1323 1324 if (!td->o.disable_clat) { 1325 add_clat_sample(td, idx, lusec, bytes); 1326 io_u_mark_latency(td, lusec); 1327 } 1328 1329 if (!td->o.disable_bw) 1330 add_bw_sample(td, idx, bytes, &icd->time); 1331 1332 add_iops_sample(td, idx, &icd->time); 1333} 1334 1335static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) 1336{ 1337 unsigned long long secs, remainder, bps, bytes; 1338 bytes = td->this_io_bytes[ddir]; 1339 bps = td->rate_bps[ddir]; 1340 secs = bytes / bps; 1341 remainder = bytes % bps; 1342 return remainder * 1000000 / bps + secs * 1000000; 1343} 1344 1345static void io_completed(struct thread_data *td, struct io_u *io_u, 1346 struct io_completion_data *icd) 1347{ 1348 /* 1349 * Older gcc's are too dumb to realize that usec is always used 1350 * initialized, silence that warning. 1351 */ 1352 unsigned long uninitialized_var(usec); 1353 struct fio_file *f; 1354 1355 dprint_io_u(io_u, "io complete"); 1356 1357 td_io_u_lock(td); 1358 assert(io_u->flags & IO_U_F_FLIGHT); 1359 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); 1360 td_io_u_unlock(td); 1361 1362 if (ddir_sync(io_u->ddir)) { 1363 td->last_was_sync = 1; 1364 f = io_u->file; 1365 if (f) { 1366 f->first_write = -1ULL; 1367 f->last_write = -1ULL; 1368 } 1369 return; 1370 } 1371 1372 td->last_was_sync = 0; 1373 td->last_ddir = io_u->ddir; 1374 1375 if (!io_u->error && ddir_rw(io_u->ddir)) { 1376 unsigned int bytes = io_u->buflen - io_u->resid; 1377 const enum fio_ddir idx = io_u->ddir; 1378 const enum fio_ddir odx = io_u->ddir ^ 1; 1379 int ret; 1380 1381 td->io_blocks[idx]++; 1382 td->this_io_blocks[idx]++; 1383 td->io_bytes[idx] += bytes; 1384 1385 if (!(io_u->flags & IO_U_F_VER_LIST)) 1386 td->this_io_bytes[idx] += bytes; 1387 1388 if (idx == DDIR_WRITE) { 1389 f = io_u->file; 1390 if (f) { 1391 if (f->first_write == -1ULL || 1392 io_u->offset < f->first_write) 1393 f->first_write = io_u->offset; 1394 if (f->last_write == -1ULL || 1395 ((io_u->offset + bytes) > f->last_write)) 1396 f->last_write = io_u->offset + bytes; 1397 } 1398 } 1399 1400 if (ramp_time_over(td) && (td->runstate == TD_RUNNING || 1401 td->runstate == TD_VERIFYING)) { 1402 account_io_completion(td, io_u, icd, idx, bytes); 1403 1404 if (__should_check_rate(td, idx)) { 1405 td->rate_pending_usleep[idx] = 1406 (usec_for_io(td, idx) - 1407 utime_since_now(&td->start)); 1408 } 1409 if (__should_check_rate(td, odx)) 1410 td->rate_pending_usleep[odx] = 1411 (usec_for_io(td, odx) - 1412 utime_since_now(&td->start)); 1413 } 1414 1415 if (td_write(td) && idx == DDIR_WRITE && 1416 td->o.do_verify && 1417 td->o.verify != VERIFY_NONE) 1418 log_io_piece(td, io_u); 1419 1420 icd->bytes_done[idx] += bytes; 1421 1422 if (io_u->end_io) { 1423 ret = io_u->end_io(td, io_u); 1424 if (ret && !icd->error) 1425 icd->error = ret; 1426 } 1427 } else if (io_u->error) { 1428 icd->error = io_u->error; 1429 io_u_log_error(td, io_u); 1430 } 1431 if (icd->error && td_non_fatal_error(icd->error) && 1432 (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) { 1433 /* 1434 * If there is a non_fatal error, then add to the error count 1435 * and clear all the errors. 1436 */ 1437 update_error_count(td, icd->error); 1438 td_clear_error(td); 1439 icd->error = 0; 1440 io_u->error = 0; 1441 } 1442} 1443 1444static void init_icd(struct thread_data *td, struct io_completion_data *icd, 1445 int nr) 1446{ 1447 if (!td->o.disable_clat || !td->o.disable_bw) 1448 fio_gettime(&icd->time, NULL); 1449 1450 icd->nr = nr; 1451 1452 icd->error = 0; 1453 icd->bytes_done[0] = icd->bytes_done[1] = 0; 1454} 1455 1456static void ios_completed(struct thread_data *td, 1457 struct io_completion_data *icd) 1458{ 1459 struct io_u *io_u; 1460 int i; 1461 1462 for (i = 0; i < icd->nr; i++) { 1463 io_u = td->io_ops->event(td, i); 1464 1465 io_completed(td, io_u, icd); 1466 1467 if (!(io_u->flags & IO_U_F_FREE_DEF)) 1468 put_io_u(td, io_u); 1469 } 1470} 1471 1472/* 1473 * Complete a single io_u for the sync engines. 1474 */ 1475int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, 1476 unsigned long *bytes) 1477{ 1478 struct io_completion_data icd; 1479 1480 init_icd(td, &icd, 1); 1481 io_completed(td, io_u, &icd); 1482 1483 if (!(io_u->flags & IO_U_F_FREE_DEF)) 1484 put_io_u(td, io_u); 1485 1486 if (icd.error) { 1487 td_verror(td, icd.error, "io_u_sync_complete"); 1488 return -1; 1489 } 1490 1491 if (bytes) { 1492 bytes[0] += icd.bytes_done[0]; 1493 bytes[1] += icd.bytes_done[1]; 1494 } 1495 1496 return 0; 1497} 1498 1499/* 1500 * Called to complete min_events number of io for the async engines. 1501 */ 1502int io_u_queued_complete(struct thread_data *td, int min_evts, 1503 unsigned long *bytes) 1504{ 1505 struct io_completion_data icd; 1506 struct timespec *tvp = NULL; 1507 int ret; 1508 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; 1509 1510 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); 1511 1512 if (!min_evts) 1513 tvp = &ts; 1514 1515 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); 1516 if (ret < 0) { 1517 td_verror(td, -ret, "td_io_getevents"); 1518 return ret; 1519 } else if (!ret) 1520 return ret; 1521 1522 init_icd(td, &icd, ret); 1523 ios_completed(td, &icd); 1524 if (icd.error) { 1525 td_verror(td, icd.error, "io_u_queued_complete"); 1526 return -1; 1527 } 1528 1529 if (bytes) { 1530 bytes[0] += icd.bytes_done[0]; 1531 bytes[1] += icd.bytes_done[1]; 1532 } 1533 1534 return 0; 1535} 1536 1537/* 1538 * Call when io_u is really queued, to update the submission latency. 1539 */ 1540void io_u_queued(struct thread_data *td, struct io_u *io_u) 1541{ 1542 if (!td->o.disable_slat) { 1543 unsigned long slat_time; 1544 1545 slat_time = utime_since(&io_u->start_time, &io_u->issue_time); 1546 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); 1547 } 1548} 1549 1550/* 1551 * "randomly" fill the buffer contents 1552 */ 1553void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, 1554 unsigned int min_write, unsigned int max_bs) 1555{ 1556 io_u->buf_filled_len = 0; 1557 1558 if (!td->o.zero_buffers) { 1559 unsigned int perc = td->o.compress_percentage; 1560 1561 if (perc) { 1562 unsigned int seg = min_write; 1563 1564 seg = min(min_write, td->o.compress_chunk); 1565 fill_random_buf_percentage(&td->buf_state, io_u->buf, 1566 perc, seg, max_bs); 1567 } else 1568 fill_random_buf(&td->buf_state, io_u->buf, max_bs); 1569 } else 1570 memset(io_u->buf, 0, max_bs); 1571} 1572