io_u.c revision 068420271828b3b2426ffc3ccf64404cb9d340fb
1#include <unistd.h> 2#include <fcntl.h> 3#include <string.h> 4#include <signal.h> 5#include <time.h> 6#include <assert.h> 7 8#include "fio.h" 9#include "hash.h" 10#include "verify.h" 11#include "trim.h" 12#include "lib/rand.h" 13 14struct io_completion_data { 15 int nr; /* input */ 16 int account; /* input */ 17 18 int error; /* output */ 19 unsigned long bytes_done[2]; /* output */ 20 struct timeval time; /* output */ 21}; 22 23/* 24 * The ->file_map[] contains a map of blocks we have or have not done io 25 * to yet. Used to make sure we cover the entire range in a fair fashion. 26 */ 27static int random_map_free(struct fio_file *f, const unsigned long long block) 28{ 29 unsigned int idx = RAND_MAP_IDX(f, block); 30 unsigned int bit = RAND_MAP_BIT(f, block); 31 32 dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); 33 34 return (f->file_map[idx] & (1UL << bit)) == 0; 35} 36 37/* 38 * Mark a given offset as used in the map. 39 */ 40static void mark_random_map(struct thread_data *td, struct io_u *io_u) 41{ 42 unsigned int min_bs = td->o.rw_min_bs; 43 struct fio_file *f = io_u->file; 44 unsigned long long block; 45 unsigned int blocks, nr_blocks; 46 int busy_check; 47 48 block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; 49 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; 50 blocks = 0; 51 busy_check = !(io_u->flags & IO_U_F_BUSY_OK); 52 53 while (nr_blocks) { 54 unsigned int idx, bit; 55 unsigned long mask, this_blocks; 56 57 /* 58 * If we have a mixed random workload, we may 59 * encounter blocks we already did IO to. 60 */ 61 if (!busy_check) { 62 blocks = nr_blocks; 63 break; 64 } 65 if ((td->o.ddir_seq_nr == 1) && !random_map_free(f, block)) 66 break; 67 68 idx = RAND_MAP_IDX(f, block); 69 bit = RAND_MAP_BIT(f, block); 70 71 fio_assert(td, idx < f->num_maps); 72 73 this_blocks = nr_blocks; 74 if (this_blocks + bit > BLOCKS_PER_MAP) 75 this_blocks = BLOCKS_PER_MAP - bit; 76 77 do { 78 if (this_blocks == BLOCKS_PER_MAP) 79 mask = -1UL; 80 else 81 mask = ((1UL << this_blocks) - 1) << bit; 82 83 if (!(f->file_map[idx] & mask)) 84 break; 85 86 this_blocks--; 87 } while (this_blocks); 88 89 if (!this_blocks) 90 break; 91 92 f->file_map[idx] |= mask; 93 nr_blocks -= this_blocks; 94 blocks += this_blocks; 95 block += this_blocks; 96 } 97 98 if ((blocks * min_bs) < io_u->buflen) 99 io_u->buflen = blocks * min_bs; 100} 101 102static unsigned long long last_block(struct thread_data *td, struct fio_file *f, 103 enum fio_ddir ddir) 104{ 105 unsigned long long max_blocks; 106 unsigned long long max_size; 107 108 assert(ddir_rw(ddir)); 109 110 /* 111 * Hmm, should we make sure that ->io_size <= ->real_file_size? 112 */ 113 max_size = f->io_size; 114 if (max_size > f->real_file_size) 115 max_size = f->real_file_size; 116 117 max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; 118 if (!max_blocks) 119 return 0; 120 121 return max_blocks; 122} 123 124/* 125 * Return the next free block in the map. 126 */ 127static int get_next_free_block(struct thread_data *td, struct fio_file *f, 128 enum fio_ddir ddir, unsigned long long *b) 129{ 130 unsigned long long block, min_bs = td->o.rw_min_bs, lastb; 131 int i; 132 133 lastb = last_block(td, f, ddir); 134 if (!lastb) 135 return 1; 136 137 i = f->last_free_lookup; 138 block = i * BLOCKS_PER_MAP; 139 while (block * min_bs < f->real_file_size && 140 block * min_bs < f->io_size) { 141 if (f->file_map[i] != -1UL) { 142 block += ffz(f->file_map[i]); 143 if (block > lastb) 144 break; 145 f->last_free_lookup = i; 146 *b = block; 147 return 0; 148 } 149 150 block += BLOCKS_PER_MAP; 151 i++; 152 } 153 154 dprint(FD_IO, "failed finding a free block\n"); 155 return 1; 156} 157 158static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, 159 enum fio_ddir ddir, unsigned long long *b) 160{ 161 unsigned long long rmax, r, lastb; 162 int loops = 5; 163 164 lastb = last_block(td, f, ddir); 165 if (!lastb) 166 return 1; 167 168 if (f->failed_rands >= 200) 169 goto ffz; 170 171 rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; 172 do { 173 if (td->o.use_os_rand) 174 r = os_random_long(&td->random_state); 175 else 176 r = __rand(&td->__random_state); 177 178 *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0)); 179 180 dprint(FD_RANDOM, "off rand %llu\n", r); 181 182 183 /* 184 * if we are not maintaining a random map, we are done. 185 */ 186 if (!file_randommap(td, f)) 187 goto ret_good; 188 189 /* 190 * calculate map offset and check if it's free 191 */ 192 if (random_map_free(f, *b)) 193 goto ret_good; 194 195 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", 196 *b); 197 } while (--loops); 198 199 if (!f->failed_rands++) 200 f->last_free_lookup = 0; 201 202 /* 203 * we get here, if we didn't suceed in looking up a block. generate 204 * a random start offset into the filemap, and find the first free 205 * block from there. 206 */ 207 loops = 10; 208 do { 209 f->last_free_lookup = (f->num_maps - 1) * 210 (r / ((unsigned long long) rmax + 1.0)); 211 if (!get_next_free_block(td, f, ddir, b)) 212 goto ret; 213 214 if (td->o.use_os_rand) 215 r = os_random_long(&td->random_state); 216 else 217 r = __rand(&td->__random_state); 218 } while (--loops); 219 220 /* 221 * that didn't work either, try exhaustive search from the start 222 */ 223 f->last_free_lookup = 0; 224ffz: 225 if (!get_next_free_block(td, f, ddir, b)) 226 return 0; 227 f->last_free_lookup = 0; 228 return get_next_free_block(td, f, ddir, b); 229ret_good: 230 f->failed_rands = 0; 231ret: 232 return 0; 233} 234 235static int get_next_rand_block(struct thread_data *td, struct fio_file *f, 236 enum fio_ddir ddir, unsigned long long *b) 237{ 238 if (get_next_rand_offset(td, f, ddir, b)) { 239 dprint(FD_IO, "%s: rand offset failed, last=%llu, size=%llu\n", 240 f->file_name, f->last_pos, f->real_file_size); 241 return 1; 242 } 243 244 return 0; 245} 246 247static int get_next_seq_block(struct thread_data *td, struct fio_file *f, 248 enum fio_ddir ddir, unsigned long long *b) 249{ 250 assert(ddir_rw(ddir)); 251 252 if (f->last_pos < f->real_file_size) { 253 unsigned long long pos; 254 255 if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) 256 f->last_pos = f->real_file_size; 257 258 pos = f->last_pos - f->file_offset; 259 if (pos) 260 pos += td->o.ddir_seq_add; 261 262 *b = pos / td->o.min_bs[ddir]; 263 return 0; 264 } 265 266 return 1; 267} 268 269static int get_next_block(struct thread_data *td, struct io_u *io_u, 270 enum fio_ddir ddir, int rw_seq, unsigned long long *b) 271{ 272 struct fio_file *f = io_u->file; 273 int ret; 274 275 assert(ddir_rw(ddir)); 276 277 if (rw_seq) { 278 if (td_random(td)) 279 ret = get_next_rand_block(td, f, ddir, b); 280 else 281 ret = get_next_seq_block(td, f, ddir, b); 282 } else { 283 io_u->flags |= IO_U_F_BUSY_OK; 284 285 if (td->o.rw_seq == RW_SEQ_SEQ) { 286 ret = get_next_seq_block(td, f, ddir, b); 287 if (ret) 288 ret = get_next_rand_block(td, f, ddir, b); 289 } else if (td->o.rw_seq == RW_SEQ_IDENT) { 290 if (f->last_start != -1ULL) 291 *b = (f->last_start - f->file_offset) 292 / td->o.min_bs[ddir]; 293 else 294 *b = 0; 295 ret = 0; 296 } else { 297 log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); 298 ret = 1; 299 } 300 } 301 302 return ret; 303} 304 305/* 306 * For random io, generate a random new block and see if it's used. Repeat 307 * until we find a free one. For sequential io, just return the end of 308 * the last io issued. 309 */ 310static int __get_next_offset(struct thread_data *td, struct io_u *io_u) 311{ 312 struct fio_file *f = io_u->file; 313 unsigned long long b; 314 enum fio_ddir ddir = io_u->ddir; 315 int rw_seq_hit = 0; 316 317 assert(ddir_rw(ddir)); 318 319 if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { 320 rw_seq_hit = 1; 321 td->ddir_seq_nr = td->o.ddir_seq_nr; 322 } 323 324 if (get_next_block(td, io_u, ddir, rw_seq_hit, &b)) 325 return 1; 326 327 io_u->offset = b * td->o.ba[ddir]; 328 if (io_u->offset >= f->io_size) { 329 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", 330 io_u->offset, f->io_size); 331 return 1; 332 } 333 334 io_u->offset += f->file_offset; 335 if (io_u->offset >= f->real_file_size) { 336 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", 337 io_u->offset, f->real_file_size); 338 return 1; 339 } 340 341 return 0; 342} 343 344static int get_next_offset(struct thread_data *td, struct io_u *io_u) 345{ 346 struct prof_io_ops *ops = &td->prof_io_ops; 347 348 if (ops->fill_io_u_off) 349 return ops->fill_io_u_off(td, io_u); 350 351 return __get_next_offset(td, io_u); 352} 353 354static inline int io_u_fits(struct thread_data *td, struct io_u *io_u, 355 unsigned int buflen) 356{ 357 struct fio_file *f = io_u->file; 358 359 return io_u->offset + buflen <= f->io_size + td->o.start_offset; 360} 361 362static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) 363{ 364 const int ddir = io_u->ddir; 365 unsigned int uninitialized_var(buflen); 366 unsigned int minbs, maxbs; 367 unsigned long r, rand_max; 368 369 assert(ddir_rw(ddir)); 370 371 minbs = td->o.min_bs[ddir]; 372 maxbs = td->o.max_bs[ddir]; 373 374 if (minbs == maxbs) 375 return minbs; 376 377 if (td->o.use_os_rand) 378 rand_max = OS_RAND_MAX; 379 else 380 rand_max = FRAND_MAX; 381 382 do { 383 if (td->o.use_os_rand) 384 r = os_random_long(&td->bsrange_state); 385 else 386 r = __rand(&td->__bsrange_state); 387 388 if (!td->o.bssplit_nr[ddir]) { 389 buflen = 1 + (unsigned int) ((double) maxbs * 390 (r / (rand_max + 1.0))); 391 if (buflen < minbs) 392 buflen = minbs; 393 } else { 394 long perc = 0; 395 unsigned int i; 396 397 for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { 398 struct bssplit *bsp = &td->o.bssplit[ddir][i]; 399 400 buflen = bsp->bs; 401 perc += bsp->perc; 402 if ((r <= ((rand_max / 100L) * perc)) && 403 io_u_fits(td, io_u, buflen)) 404 break; 405 } 406 } 407 408 if (!td->o.bs_unaligned && is_power_of_2(minbs)) 409 buflen = (buflen + minbs - 1) & ~(minbs - 1); 410 411 } while (!io_u_fits(td, io_u, buflen)); 412 413 return buflen; 414} 415 416static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) 417{ 418 struct prof_io_ops *ops = &td->prof_io_ops; 419 420 if (ops->fill_io_u_size) 421 return ops->fill_io_u_size(td, io_u); 422 423 return __get_next_buflen(td, io_u); 424} 425 426static void set_rwmix_bytes(struct thread_data *td) 427{ 428 unsigned int diff; 429 430 /* 431 * we do time or byte based switch. this is needed because 432 * buffered writes may issue a lot quicker than they complete, 433 * whereas reads do not. 434 */ 435 diff = td->o.rwmix[td->rwmix_ddir ^ 1]; 436 td->rwmix_issues = (td->io_issues[td->rwmix_ddir] * diff) / 100; 437} 438 439static inline enum fio_ddir get_rand_ddir(struct thread_data *td) 440{ 441 unsigned int v; 442 unsigned long r; 443 444 if (td->o.use_os_rand) { 445 r = os_random_long(&td->rwmix_state); 446 v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); 447 } else { 448 r = __rand(&td->__rwmix_state); 449 v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); 450 } 451 452 if (v <= td->o.rwmix[DDIR_READ]) 453 return DDIR_READ; 454 455 return DDIR_WRITE; 456} 457 458static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) 459{ 460 enum fio_ddir odir = ddir ^ 1; 461 struct timeval t; 462 long usec; 463 464 assert(ddir_rw(ddir)); 465 466 if (td->rate_pending_usleep[ddir] <= 0) 467 return ddir; 468 469 /* 470 * We have too much pending sleep in this direction. See if we 471 * should switch. 472 */ 473 if (td_rw(td)) { 474 /* 475 * Other direction does not have too much pending, switch 476 */ 477 if (td->rate_pending_usleep[odir] < 100000) 478 return odir; 479 480 /* 481 * Both directions have pending sleep. Sleep the minimum time 482 * and deduct from both. 483 */ 484 if (td->rate_pending_usleep[ddir] <= 485 td->rate_pending_usleep[odir]) { 486 usec = td->rate_pending_usleep[ddir]; 487 } else { 488 usec = td->rate_pending_usleep[odir]; 489 ddir = odir; 490 } 491 } else 492 usec = td->rate_pending_usleep[ddir]; 493 494 /* 495 * We are going to sleep, ensure that we flush anything pending as 496 * not to skew our latency numbers 497 */ 498 if (td->cur_depth) { 499 int fio_unused ret; 500 501 ret = io_u_queued_complete(td, td->cur_depth, NULL); 502 } 503 504 fio_gettime(&t, NULL); 505 usec_sleep(td, usec); 506 usec = utime_since_now(&t); 507 508 td->rate_pending_usleep[ddir] -= usec; 509 510 odir = ddir ^ 1; 511 if (td_rw(td) && __should_check_rate(td, odir)) 512 td->rate_pending_usleep[odir] -= usec; 513 514 return ddir; 515} 516 517/* 518 * Return the data direction for the next io_u. If the job is a 519 * mixed read/write workload, check the rwmix cycle and switch if 520 * necessary. 521 */ 522static enum fio_ddir get_rw_ddir(struct thread_data *td) 523{ 524 enum fio_ddir ddir; 525 526 /* 527 * see if it's time to fsync 528 */ 529 if (td->o.fsync_blocks && 530 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && 531 td->io_issues[DDIR_WRITE] && should_fsync(td)) 532 return DDIR_SYNC; 533 534 /* 535 * see if it's time to fdatasync 536 */ 537 if (td->o.fdatasync_blocks && 538 !(td->io_issues[DDIR_WRITE] % td->o.fdatasync_blocks) && 539 td->io_issues[DDIR_WRITE] && should_fsync(td)) 540 return DDIR_DATASYNC; 541 542 /* 543 * see if it's time to sync_file_range 544 */ 545 if (td->sync_file_range_nr && 546 !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr) && 547 td->io_issues[DDIR_WRITE] && should_fsync(td)) 548 return DDIR_SYNC_FILE_RANGE; 549 550 if (td_rw(td)) { 551 /* 552 * Check if it's time to seed a new data direction. 553 */ 554 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { 555 /* 556 * Put a top limit on how many bytes we do for 557 * one data direction, to avoid overflowing the 558 * ranges too much 559 */ 560 ddir = get_rand_ddir(td); 561 562 if (ddir != td->rwmix_ddir) 563 set_rwmix_bytes(td); 564 565 td->rwmix_ddir = ddir; 566 } 567 ddir = td->rwmix_ddir; 568 } else if (td_read(td)) 569 ddir = DDIR_READ; 570 else 571 ddir = DDIR_WRITE; 572 573 td->rwmix_ddir = rate_ddir(td, ddir); 574 return td->rwmix_ddir; 575} 576 577static void set_rw_ddir(struct thread_data *td, struct io_u *io_u) 578{ 579 io_u->ddir = get_rw_ddir(td); 580 581 if (io_u->ddir == DDIR_WRITE && (td->io_ops->flags & FIO_BARRIER) && 582 td->o.barrier_blocks && 583 !(td->io_issues[DDIR_WRITE] % td->o.barrier_blocks) && 584 td->io_issues[DDIR_WRITE]) 585 io_u->flags |= IO_U_F_BARRIER; 586} 587 588void put_file_log(struct thread_data *td, struct fio_file *f) 589{ 590 int ret = put_file(td, f); 591 592 if (ret) 593 td_verror(td, ret, "file close"); 594} 595 596void put_io_u(struct thread_data *td, struct io_u *io_u) 597{ 598 td_io_u_lock(td); 599 600 if (io_u->file && !(io_u->flags & IO_U_F_FREE_DEF)) 601 put_file_log(td, io_u->file); 602 io_u->file = NULL; 603 io_u->flags &= ~IO_U_F_FREE_DEF; 604 io_u->flags |= IO_U_F_FREE; 605 606 if (io_u->flags & IO_U_F_IN_CUR_DEPTH) 607 td->cur_depth--; 608 flist_del_init(&io_u->list); 609 flist_add(&io_u->list, &td->io_u_freelist); 610 td_io_u_unlock(td); 611 td_io_u_free_notify(td); 612} 613 614void clear_io_u(struct thread_data *td, struct io_u *io_u) 615{ 616 io_u->flags &= ~IO_U_F_FLIGHT; 617 put_io_u(td, io_u); 618} 619 620void requeue_io_u(struct thread_data *td, struct io_u **io_u) 621{ 622 struct io_u *__io_u = *io_u; 623 624 dprint(FD_IO, "requeue %p\n", __io_u); 625 626 td_io_u_lock(td); 627 628 __io_u->flags |= IO_U_F_FREE; 629 if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) 630 td->io_issues[__io_u->ddir]--; 631 632 __io_u->flags &= ~IO_U_F_FLIGHT; 633 if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) 634 td->cur_depth--; 635 flist_del(&__io_u->list); 636 flist_add_tail(&__io_u->list, &td->io_u_requeues); 637 td_io_u_unlock(td); 638 *io_u = NULL; 639} 640 641static int fill_io_u(struct thread_data *td, struct io_u *io_u) 642{ 643 if (td->io_ops->flags & FIO_NOIO) 644 goto out; 645 646 set_rw_ddir(td, io_u); 647 648 /* 649 * fsync() or fdatasync() or trim etc, we are done 650 */ 651 if (!ddir_rw(io_u->ddir)) 652 goto out; 653 654 /* 655 * See if it's time to switch to a new zone 656 */ 657 if (td->zone_bytes >= td->o.zone_size) { 658 td->zone_bytes = 0; 659 io_u->file->last_pos += td->o.zone_skip; 660 td->io_skip_bytes += td->o.zone_skip; 661 } 662 663 /* 664 * No log, let the seq/rand engine retrieve the next buflen and 665 * position. 666 */ 667 if (get_next_offset(td, io_u)) { 668 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); 669 return 1; 670 } 671 672 io_u->buflen = get_next_buflen(td, io_u); 673 if (!io_u->buflen) { 674 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); 675 return 1; 676 } 677 678 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { 679 dprint(FD_IO, "io_u %p, offset too large\n", io_u); 680 dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, 681 io_u->buflen, io_u->file->real_file_size); 682 return 1; 683 } 684 685 /* 686 * mark entry before potentially trimming io_u 687 */ 688 if (td_random(td) && file_randommap(td, io_u->file)) 689 mark_random_map(td, io_u); 690 691 /* 692 * If using a write iolog, store this entry. 693 */ 694out: 695 dprint_io_u(io_u, "fill_io_u"); 696 td->zone_bytes += io_u->buflen; 697 log_io_u(td, io_u); 698 return 0; 699} 700 701static void __io_u_mark_map(unsigned int *map, unsigned int nr) 702{ 703 int idx = 0; 704 705 switch (nr) { 706 default: 707 idx = 6; 708 break; 709 case 33 ... 64: 710 idx = 5; 711 break; 712 case 17 ... 32: 713 idx = 4; 714 break; 715 case 9 ... 16: 716 idx = 3; 717 break; 718 case 5 ... 8: 719 idx = 2; 720 break; 721 case 1 ... 4: 722 idx = 1; 723 case 0: 724 break; 725 } 726 727 map[idx]++; 728} 729 730void io_u_mark_submit(struct thread_data *td, unsigned int nr) 731{ 732 __io_u_mark_map(td->ts.io_u_submit, nr); 733 td->ts.total_submit++; 734} 735 736void io_u_mark_complete(struct thread_data *td, unsigned int nr) 737{ 738 __io_u_mark_map(td->ts.io_u_complete, nr); 739 td->ts.total_complete++; 740} 741 742void io_u_mark_depth(struct thread_data *td, unsigned int nr) 743{ 744 int idx = 0; 745 746 switch (td->cur_depth) { 747 default: 748 idx = 6; 749 break; 750 case 32 ... 63: 751 idx = 5; 752 break; 753 case 16 ... 31: 754 idx = 4; 755 break; 756 case 8 ... 15: 757 idx = 3; 758 break; 759 case 4 ... 7: 760 idx = 2; 761 break; 762 case 2 ... 3: 763 idx = 1; 764 case 1: 765 break; 766 } 767 768 td->ts.io_u_map[idx] += nr; 769} 770 771static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) 772{ 773 int idx = 0; 774 775 assert(usec < 1000); 776 777 switch (usec) { 778 case 750 ... 999: 779 idx = 9; 780 break; 781 case 500 ... 749: 782 idx = 8; 783 break; 784 case 250 ... 499: 785 idx = 7; 786 break; 787 case 100 ... 249: 788 idx = 6; 789 break; 790 case 50 ... 99: 791 idx = 5; 792 break; 793 case 20 ... 49: 794 idx = 4; 795 break; 796 case 10 ... 19: 797 idx = 3; 798 break; 799 case 4 ... 9: 800 idx = 2; 801 break; 802 case 2 ... 3: 803 idx = 1; 804 case 0 ... 1: 805 break; 806 } 807 808 assert(idx < FIO_IO_U_LAT_U_NR); 809 td->ts.io_u_lat_u[idx]++; 810} 811 812static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) 813{ 814 int idx = 0; 815 816 switch (msec) { 817 default: 818 idx = 11; 819 break; 820 case 1000 ... 1999: 821 idx = 10; 822 break; 823 case 750 ... 999: 824 idx = 9; 825 break; 826 case 500 ... 749: 827 idx = 8; 828 break; 829 case 250 ... 499: 830 idx = 7; 831 break; 832 case 100 ... 249: 833 idx = 6; 834 break; 835 case 50 ... 99: 836 idx = 5; 837 break; 838 case 20 ... 49: 839 idx = 4; 840 break; 841 case 10 ... 19: 842 idx = 3; 843 break; 844 case 4 ... 9: 845 idx = 2; 846 break; 847 case 2 ... 3: 848 idx = 1; 849 case 0 ... 1: 850 break; 851 } 852 853 assert(idx < FIO_IO_U_LAT_M_NR); 854 td->ts.io_u_lat_m[idx]++; 855} 856 857static void io_u_mark_latency(struct thread_data *td, unsigned long usec) 858{ 859 if (usec < 1000) 860 io_u_mark_lat_usec(td, usec); 861 else 862 io_u_mark_lat_msec(td, usec / 1000); 863} 864 865/* 866 * Get next file to service by choosing one at random 867 */ 868static struct fio_file *get_next_file_rand(struct thread_data *td, 869 enum fio_file_flags goodf, 870 enum fio_file_flags badf) 871{ 872 struct fio_file *f; 873 int fno; 874 875 do { 876 int opened = 0; 877 unsigned long r; 878 879 if (td->o.use_os_rand) { 880 r = os_random_long(&td->next_file_state); 881 fno = (unsigned int) ((double) td->o.nr_files 882 * (r / (OS_RAND_MAX + 1.0))); 883 } else { 884 r = __rand(&td->__next_file_state); 885 fno = (unsigned int) ((double) td->o.nr_files 886 * (r / (FRAND_MAX + 1.0))); 887 } 888 889 f = td->files[fno]; 890 if (fio_file_done(f)) 891 continue; 892 893 if (!fio_file_open(f)) { 894 int err; 895 896 err = td_io_open_file(td, f); 897 if (err) 898 continue; 899 opened = 1; 900 } 901 902 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { 903 dprint(FD_FILE, "get_next_file_rand: %p\n", f); 904 return f; 905 } 906 if (opened) 907 td_io_close_file(td, f); 908 } while (1); 909} 910 911/* 912 * Get next file to service by doing round robin between all available ones 913 */ 914static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, 915 int badf) 916{ 917 unsigned int old_next_file = td->next_file; 918 struct fio_file *f; 919 920 do { 921 int opened = 0; 922 923 f = td->files[td->next_file]; 924 925 td->next_file++; 926 if (td->next_file >= td->o.nr_files) 927 td->next_file = 0; 928 929 dprint(FD_FILE, "trying file %s %x\n", f->file_name, f->flags); 930 if (fio_file_done(f)) { 931 f = NULL; 932 continue; 933 } 934 935 if (!fio_file_open(f)) { 936 int err; 937 938 err = td_io_open_file(td, f); 939 if (err) { 940 dprint(FD_FILE, "error %d on open of %s\n", 941 err, f->file_name); 942 f = NULL; 943 continue; 944 } 945 opened = 1; 946 } 947 948 dprint(FD_FILE, "goodf=%x, badf=%x, ff=%x\n", goodf, badf, 949 f->flags); 950 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) 951 break; 952 953 if (opened) 954 td_io_close_file(td, f); 955 956 f = NULL; 957 } while (td->next_file != old_next_file); 958 959 dprint(FD_FILE, "get_next_file_rr: %p\n", f); 960 return f; 961} 962 963static struct fio_file *__get_next_file(struct thread_data *td) 964{ 965 struct fio_file *f; 966 967 assert(td->o.nr_files <= td->files_index); 968 969 if (td->nr_done_files >= td->o.nr_files) { 970 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," 971 " nr_files=%d\n", td->nr_open_files, 972 td->nr_done_files, 973 td->o.nr_files); 974 return NULL; 975 } 976 977 f = td->file_service_file; 978 if (f && fio_file_open(f) && !fio_file_closing(f)) { 979 if (td->o.file_service_type == FIO_FSERVICE_SEQ) 980 goto out; 981 if (td->file_service_left--) 982 goto out; 983 } 984 985 if (td->o.file_service_type == FIO_FSERVICE_RR || 986 td->o.file_service_type == FIO_FSERVICE_SEQ) 987 f = get_next_file_rr(td, FIO_FILE_open, FIO_FILE_closing); 988 else 989 f = get_next_file_rand(td, FIO_FILE_open, FIO_FILE_closing); 990 991 td->file_service_file = f; 992 td->file_service_left = td->file_service_nr - 1; 993out: 994 dprint(FD_FILE, "get_next_file: %p [%s]\n", f, f->file_name); 995 return f; 996} 997 998static struct fio_file *get_next_file(struct thread_data *td) 999{ 1000 struct prof_io_ops *ops = &td->prof_io_ops; 1001 1002 if (ops->get_next_file) 1003 return ops->get_next_file(td); 1004 1005 return __get_next_file(td); 1006} 1007 1008static int set_io_u_file(struct thread_data *td, struct io_u *io_u) 1009{ 1010 struct fio_file *f; 1011 1012 do { 1013 f = get_next_file(td); 1014 if (!f) 1015 return 1; 1016 1017 io_u->file = f; 1018 get_file(f); 1019 1020 if (!fill_io_u(td, io_u)) 1021 break; 1022 1023 put_file_log(td, f); 1024 td_io_close_file(td, f); 1025 io_u->file = NULL; 1026 fio_file_set_done(f); 1027 td->nr_done_files++; 1028 dprint(FD_FILE, "%s: is done (%d of %d)\n", f->file_name, 1029 td->nr_done_files, td->o.nr_files); 1030 } while (1); 1031 1032 return 0; 1033} 1034 1035 1036struct io_u *__get_io_u(struct thread_data *td) 1037{ 1038 struct io_u *io_u = NULL; 1039 1040 td_io_u_lock(td); 1041 1042again: 1043 if (!flist_empty(&td->io_u_requeues)) 1044 io_u = flist_entry(td->io_u_requeues.next, struct io_u, list); 1045 else if (!queue_full(td)) { 1046 io_u = flist_entry(td->io_u_freelist.next, struct io_u, list); 1047 1048 io_u->buflen = 0; 1049 io_u->resid = 0; 1050 io_u->file = NULL; 1051 io_u->end_io = NULL; 1052 } 1053 1054 if (io_u) { 1055 assert(io_u->flags & IO_U_F_FREE); 1056 io_u->flags &= ~(IO_U_F_FREE | IO_U_F_FREE_DEF); 1057 io_u->flags &= ~(IO_U_F_TRIMMED | IO_U_F_BARRIER); 1058 1059 io_u->error = 0; 1060 flist_del(&io_u->list); 1061 flist_add(&io_u->list, &td->io_u_busylist); 1062 td->cur_depth++; 1063 io_u->flags |= IO_U_F_IN_CUR_DEPTH; 1064 } else if (td->o.verify_async) { 1065 /* 1066 * We ran out, wait for async verify threads to finish and 1067 * return one 1068 */ 1069 pthread_cond_wait(&td->free_cond, &td->io_u_lock); 1070 goto again; 1071 } 1072 1073 td_io_u_unlock(td); 1074 return io_u; 1075} 1076 1077static int check_get_trim(struct thread_data *td, struct io_u *io_u) 1078{ 1079 if (td->o.trim_backlog && td->trim_entries) { 1080 int get_trim = 0; 1081 1082 if (td->trim_batch) { 1083 td->trim_batch--; 1084 get_trim = 1; 1085 } else if (!(td->io_hist_len % td->o.trim_backlog) && 1086 td->last_ddir != DDIR_READ) { 1087 td->trim_batch = td->o.trim_batch; 1088 if (!td->trim_batch) 1089 td->trim_batch = td->o.trim_backlog; 1090 get_trim = 1; 1091 } 1092 1093 if (get_trim && !get_next_trim(td, io_u)) 1094 return 1; 1095 } 1096 1097 return 0; 1098} 1099 1100static int check_get_verify(struct thread_data *td, struct io_u *io_u) 1101{ 1102 if (td->o.verify_backlog && td->io_hist_len) { 1103 int get_verify = 0; 1104 1105 if (td->verify_batch) { 1106 td->verify_batch--; 1107 get_verify = 1; 1108 } else if (!(td->io_hist_len % td->o.verify_backlog) && 1109 td->last_ddir != DDIR_READ) { 1110 td->verify_batch = td->o.verify_batch; 1111 if (!td->verify_batch) 1112 td->verify_batch = td->o.verify_backlog; 1113 get_verify = 1; 1114 } 1115 1116 if (get_verify && !get_next_verify(td, io_u)) 1117 return 1; 1118 } 1119 1120 return 0; 1121} 1122 1123/* 1124 * Fill offset and start time into the buffer content, to prevent too 1125 * easy compressible data for simple de-dupe attempts. Do this for every 1126 * 512b block in the range, since that should be the smallest block size 1127 * we can expect from a device. 1128 */ 1129static void small_content_scramble(struct io_u *io_u) 1130{ 1131 unsigned int i, nr_blocks = io_u->buflen / 512; 1132 unsigned long long boffset; 1133 unsigned int offset; 1134 void *p, *end; 1135 1136 if (!nr_blocks) 1137 return; 1138 1139 p = io_u->xfer_buf; 1140 boffset = io_u->offset; 1141 1142 for (i = 0; i < nr_blocks; i++) { 1143 /* 1144 * Fill the byte offset into a "random" start offset of 1145 * the buffer, given by the product of the usec time 1146 * and the actual offset. 1147 */ 1148 offset = (io_u->start_time.tv_usec ^ boffset) & 511; 1149 offset &= ~(sizeof(unsigned long long) - 1); 1150 if (offset >= 512 - sizeof(unsigned long long)) 1151 offset -= sizeof(unsigned long long); 1152 memcpy(p + offset, &boffset, sizeof(boffset)); 1153 1154 end = p + 512 - sizeof(io_u->start_time); 1155 memcpy(end, &io_u->start_time, sizeof(io_u->start_time)); 1156 p += 512; 1157 boffset += 512; 1158 } 1159} 1160 1161/* 1162 * Return an io_u to be processed. Gets a buflen and offset, sets direction, 1163 * etc. The returned io_u is fully ready to be prepped and submitted. 1164 */ 1165struct io_u *get_io_u(struct thread_data *td) 1166{ 1167 struct fio_file *f; 1168 struct io_u *io_u; 1169 int do_scramble = 0; 1170 1171 io_u = __get_io_u(td); 1172 if (!io_u) { 1173 dprint(FD_IO, "__get_io_u failed\n"); 1174 return NULL; 1175 } 1176 1177 if (check_get_verify(td, io_u)) 1178 goto out; 1179 if (check_get_trim(td, io_u)) 1180 goto out; 1181 1182 /* 1183 * from a requeue, io_u already setup 1184 */ 1185 if (io_u->file) 1186 goto out; 1187 1188 /* 1189 * If using an iolog, grab next piece if any available. 1190 */ 1191 if (td->o.read_iolog_file) { 1192 if (read_iolog_get(td, io_u)) 1193 goto err_put; 1194 } else if (set_io_u_file(td, io_u)) { 1195 dprint(FD_IO, "io_u %p, setting file failed\n", io_u); 1196 goto err_put; 1197 } 1198 1199 f = io_u->file; 1200 assert(fio_file_open(f)); 1201 1202 if (ddir_rw(io_u->ddir)) { 1203 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { 1204 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); 1205 goto err_put; 1206 } 1207 1208 f->last_start = io_u->offset; 1209 f->last_pos = io_u->offset + io_u->buflen; 1210 1211 if (io_u->ddir == DDIR_WRITE) { 1212 if (td->o.verify != VERIFY_NONE) 1213 populate_verify_io_u(td, io_u); 1214 else if (td->o.refill_buffers) 1215 io_u_fill_buffer(td, io_u, io_u->xfer_buflen); 1216 else if (td->o.scramble_buffers) 1217 do_scramble = 1; 1218 } else if (io_u->ddir == DDIR_READ) { 1219 /* 1220 * Reset the buf_filled parameters so next time if the 1221 * buffer is used for writes it is refilled. 1222 */ 1223 io_u->buf_filled_len = 0; 1224 } 1225 } 1226 1227 /* 1228 * Set io data pointers. 1229 */ 1230 io_u->xfer_buf = io_u->buf; 1231 io_u->xfer_buflen = io_u->buflen; 1232 1233out: 1234 assert(io_u->file); 1235 if (!td_io_prep(td, io_u)) { 1236 if (!td->o.disable_slat) 1237 fio_gettime(&io_u->start_time, NULL); 1238 if (do_scramble) 1239 small_content_scramble(io_u); 1240 return io_u; 1241 } 1242err_put: 1243 dprint(FD_IO, "get_io_u failed\n"); 1244 put_io_u(td, io_u); 1245 return NULL; 1246} 1247 1248void io_u_log_error(struct thread_data *td, struct io_u *io_u) 1249{ 1250 const char *msg[] = { "read", "write", "sync", "datasync", 1251 "sync_file_range", "wait", "trim" }; 1252 1253 1254 1255 log_err("fio: io_u error"); 1256 1257 if (io_u->file) 1258 log_err(" on file %s", io_u->file->file_name); 1259 1260 log_err(": %s\n", strerror(io_u->error)); 1261 1262 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], 1263 io_u->offset, io_u->xfer_buflen); 1264 1265 if (!td->error) 1266 td_verror(td, io_u->error, "io_u error"); 1267} 1268 1269static void account_io_completion(struct thread_data *td, struct io_u *io_u, 1270 struct io_completion_data *icd, 1271 const enum fio_ddir idx, unsigned int bytes) 1272{ 1273 unsigned long uninitialized_var(lusec); 1274 1275 if (!icd->account) 1276 return; 1277 1278 if (!td->o.disable_clat || !td->o.disable_bw) 1279 lusec = utime_since(&io_u->issue_time, &icd->time); 1280 1281 if (!td->o.disable_lat) { 1282 unsigned long tusec; 1283 1284 tusec = utime_since(&io_u->start_time, &icd->time); 1285 add_lat_sample(td, idx, tusec, bytes); 1286 } 1287 1288 if (!td->o.disable_clat) { 1289 add_clat_sample(td, idx, lusec, bytes); 1290 io_u_mark_latency(td, lusec); 1291 } 1292 1293 if (!td->o.disable_bw) 1294 add_bw_sample(td, idx, bytes, &icd->time); 1295 1296 add_iops_sample(td, idx, &icd->time); 1297} 1298 1299static long long usec_for_io(struct thread_data *td, enum fio_ddir ddir) 1300{ 1301 unsigned long long secs, remainder, bps, bytes; 1302 bytes = td->this_io_bytes[ddir]; 1303 bps = td->rate_bps[ddir]; 1304 secs = bytes / bps; 1305 remainder = bytes % bps; 1306 return remainder * 1000000 / bps + secs * 1000000; 1307} 1308 1309static void io_completed(struct thread_data *td, struct io_u *io_u, 1310 struct io_completion_data *icd) 1311{ 1312 /* 1313 * Older gcc's are too dumb to realize that usec is always used 1314 * initialized, silence that warning. 1315 */ 1316 unsigned long uninitialized_var(usec); 1317 struct fio_file *f; 1318 1319 dprint_io_u(io_u, "io complete"); 1320 1321 td_io_u_lock(td); 1322 assert(io_u->flags & IO_U_F_FLIGHT); 1323 io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); 1324 td_io_u_unlock(td); 1325 1326 if (ddir_sync(io_u->ddir)) { 1327 td->last_was_sync = 1; 1328 f = io_u->file; 1329 if (f) { 1330 f->first_write = -1ULL; 1331 f->last_write = -1ULL; 1332 } 1333 return; 1334 } 1335 1336 td->last_was_sync = 0; 1337 td->last_ddir = io_u->ddir; 1338 1339 if (!io_u->error && ddir_rw(io_u->ddir)) { 1340 unsigned int bytes = io_u->buflen - io_u->resid; 1341 const enum fio_ddir idx = io_u->ddir; 1342 const enum fio_ddir odx = io_u->ddir ^ 1; 1343 int ret; 1344 1345 td->io_blocks[idx]++; 1346 td->this_io_blocks[idx]++; 1347 td->io_bytes[idx] += bytes; 1348 td->this_io_bytes[idx] += bytes; 1349 1350 if (idx == DDIR_WRITE) { 1351 f = io_u->file; 1352 if (f) { 1353 if (f->first_write == -1ULL || 1354 io_u->offset < f->first_write) 1355 f->first_write = io_u->offset; 1356 if (f->last_write == -1ULL || 1357 ((io_u->offset + bytes) > f->last_write)) 1358 f->last_write = io_u->offset + bytes; 1359 } 1360 } 1361 1362 if (ramp_time_over(td) && td->runstate == TD_RUNNING) { 1363 account_io_completion(td, io_u, icd, idx, bytes); 1364 1365 if (__should_check_rate(td, idx)) { 1366 td->rate_pending_usleep[idx] = 1367 (usec_for_io(td, idx) - 1368 utime_since_now(&td->start)); 1369 } 1370 if (__should_check_rate(td, odx)) 1371 td->rate_pending_usleep[odx] = 1372 (usec_for_io(td, odx) - 1373 utime_since_now(&td->start)); 1374 } 1375 1376 if (td_write(td) && idx == DDIR_WRITE && 1377 td->o.do_verify && 1378 td->o.verify != VERIFY_NONE) 1379 log_io_piece(td, io_u); 1380 1381 icd->bytes_done[idx] += bytes; 1382 1383 if (io_u->end_io) { 1384 ret = io_u->end_io(td, io_u); 1385 if (ret && !icd->error) 1386 icd->error = ret; 1387 } 1388 } else if (io_u->error) { 1389 icd->error = io_u->error; 1390 io_u_log_error(td, io_u); 1391 } 1392 if (icd->error && td_non_fatal_error(icd->error) && 1393 (td->o.continue_on_error & td_error_type(io_u->ddir, icd->error))) { 1394 /* 1395 * If there is a non_fatal error, then add to the error count 1396 * and clear all the errors. 1397 */ 1398 update_error_count(td, icd->error); 1399 td_clear_error(td); 1400 icd->error = 0; 1401 io_u->error = 0; 1402 } 1403} 1404 1405static void init_icd(struct thread_data *td, struct io_completion_data *icd, 1406 int nr) 1407{ 1408 if (!td->o.disable_clat || !td->o.disable_bw) 1409 fio_gettime(&icd->time, NULL); 1410 1411 icd->nr = nr; 1412 icd->account = 1; 1413 1414 icd->error = 0; 1415 icd->bytes_done[0] = icd->bytes_done[1] = 0; 1416} 1417 1418static void ios_completed(struct thread_data *td, 1419 struct io_completion_data *icd) 1420{ 1421 struct io_u *io_u; 1422 int i; 1423 1424 for (i = 0; i < icd->nr; i++) { 1425 io_u = td->io_ops->event(td, i); 1426 1427 io_completed(td, io_u, icd); 1428 1429 if (!(io_u->flags & IO_U_F_FREE_DEF)) 1430 put_io_u(td, io_u); 1431 1432 icd->account = 0; 1433 } 1434} 1435 1436/* 1437 * Complete a single io_u for the sync engines. 1438 */ 1439int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, 1440 unsigned long *bytes) 1441{ 1442 struct io_completion_data icd; 1443 1444 init_icd(td, &icd, 1); 1445 io_completed(td, io_u, &icd); 1446 1447 if (!(io_u->flags & IO_U_F_FREE_DEF)) 1448 put_io_u(td, io_u); 1449 1450 if (icd.error) { 1451 td_verror(td, icd.error, "io_u_sync_complete"); 1452 return -1; 1453 } 1454 1455 if (bytes) { 1456 bytes[0] += icd.bytes_done[0]; 1457 bytes[1] += icd.bytes_done[1]; 1458 } 1459 1460 return 0; 1461} 1462 1463/* 1464 * Called to complete min_events number of io for the async engines. 1465 */ 1466int io_u_queued_complete(struct thread_data *td, int min_evts, 1467 unsigned long *bytes) 1468{ 1469 struct io_completion_data icd; 1470 struct timespec *tvp = NULL; 1471 int ret; 1472 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; 1473 1474 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); 1475 1476 if (!min_evts) 1477 tvp = &ts; 1478 1479 ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); 1480 if (ret < 0) { 1481 td_verror(td, -ret, "td_io_getevents"); 1482 return ret; 1483 } else if (!ret) 1484 return ret; 1485 1486 init_icd(td, &icd, ret); 1487 ios_completed(td, &icd); 1488 if (icd.error) { 1489 td_verror(td, icd.error, "io_u_queued_complete"); 1490 return -1; 1491 } 1492 1493 if (bytes) { 1494 bytes[0] += icd.bytes_done[0]; 1495 bytes[1] += icd.bytes_done[1]; 1496 } 1497 1498 return 0; 1499} 1500 1501/* 1502 * Call when io_u is really queued, to update the submission latency. 1503 */ 1504void io_u_queued(struct thread_data *td, struct io_u *io_u) 1505{ 1506 if (!td->o.disable_slat) { 1507 unsigned long slat_time; 1508 1509 slat_time = utime_since(&io_u->start_time, &io_u->issue_time); 1510 add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); 1511 } 1512} 1513 1514/* 1515 * "randomly" fill the buffer contents 1516 */ 1517void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, 1518 unsigned int max_bs) 1519{ 1520 io_u->buf_filled_len = 0; 1521 1522 if (!td->o.zero_buffers) 1523 fill_random_buf(&td->buf_state, io_u->buf, max_bs); 1524 else 1525 memset(io_u->buf, 0, max_bs); 1526} 1527