io_u.c revision 60f2c658b923afdd491d556e15a655584b9db306
1#include <unistd.h> 2#include <fcntl.h> 3#include <string.h> 4#include <signal.h> 5#include <time.h> 6#include <assert.h> 7 8#include "fio.h" 9 10/* 11 * Change this define to play with the timeout handling 12 */ 13#undef FIO_USE_TIMEOUT 14 15struct io_completion_data { 16 int nr; /* input */ 17 18 int error; /* output */ 19 unsigned long bytes_done[2]; /* output */ 20 struct timeval time; /* output */ 21}; 22 23/* 24 * The ->file_map[] contains a map of blocks we have or have not done io 25 * to yet. Used to make sure we cover the entire range in a fair fashion. 26 */ 27static int random_map_free(struct fio_file *f, const unsigned long long block) 28{ 29 unsigned int idx = RAND_MAP_IDX(f, block); 30 unsigned int bit = RAND_MAP_BIT(f, block); 31 32 dprint(FD_RANDOM, "free: b=%llu, idx=%u, bit=%u\n", block, idx, bit); 33 34 return (f->file_map[idx] & (1UL << bit)) == 0; 35} 36 37/* 38 * Mark a given offset as used in the map. 39 */ 40static void mark_random_map(struct thread_data *td, struct io_u *io_u) 41{ 42 unsigned int min_bs = td->o.rw_min_bs; 43 struct fio_file *f = io_u->file; 44 unsigned long long block; 45 unsigned int blocks; 46 unsigned int nr_blocks; 47 48 block = (io_u->offset - f->file_offset) / (unsigned long long) min_bs; 49 blocks = 0; 50 nr_blocks = (io_u->buflen + min_bs - 1) / min_bs; 51 52 while (blocks < nr_blocks) { 53 unsigned int idx, bit; 54 55 /* 56 * If we have a mixed random workload, we may 57 * encounter blocks we already did IO to. 58 */ 59 if ((td->o.ddir_nr == 1) && !random_map_free(f, block)) 60 break; 61 62 idx = RAND_MAP_IDX(f, block); 63 bit = RAND_MAP_BIT(f, block); 64 65 fio_assert(td, idx < f->num_maps); 66 67 f->file_map[idx] |= (1UL << bit); 68 block++; 69 blocks++; 70 } 71 72 if ((blocks * min_bs) < io_u->buflen) 73 io_u->buflen = blocks * min_bs; 74} 75 76static inline unsigned long long last_block(struct thread_data *td, 77 struct fio_file *f, 78 enum fio_ddir ddir) 79{ 80 unsigned long long max_blocks; 81 82 max_blocks = f->io_size / (unsigned long long) td->o.min_bs[ddir]; 83 if (!max_blocks) 84 return 0; 85 86 return max_blocks; 87} 88 89/* 90 * Return the next free block in the map. 91 */ 92static int get_next_free_block(struct thread_data *td, struct fio_file *f, 93 enum fio_ddir ddir, unsigned long long *b) 94{ 95 unsigned long long min_bs = td->o.rw_min_bs; 96 int i; 97 98 i = f->last_free_lookup; 99 *b = (i * BLOCKS_PER_MAP); 100 while ((*b) * min_bs < f->real_file_size) { 101 if (f->file_map[i] != -1UL) { 102 *b += fio_ffz(f->file_map[i]); 103 if (*b > last_block(td, f, ddir)) 104 break; 105 f->last_free_lookup = i; 106 return 0; 107 } 108 109 *b += BLOCKS_PER_MAP; 110 i++; 111 } 112 113 dprint(FD_IO, "failed finding a free block\n"); 114 return 1; 115} 116 117static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, 118 enum fio_ddir ddir, unsigned long long *b) 119{ 120 unsigned long long r; 121 int loops = 5; 122 123 do { 124 r = os_random_long(&td->random_state); 125 dprint(FD_RANDOM, "off rand %llu\n", r); 126 *b = (last_block(td, f, ddir) - 1) 127 * (r / ((unsigned long long) RAND_MAX + 1.0)); 128 129 /* 130 * if we are not maintaining a random map, we are done. 131 */ 132 if (!file_randommap(td, f)) 133 return 0; 134 135 /* 136 * calculate map offset and check if it's free 137 */ 138 if (random_map_free(f, *b)) 139 return 0; 140 141 dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", 142 *b); 143 } while (--loops); 144 145 /* 146 * we get here, if we didn't suceed in looking up a block. generate 147 * a random start offset into the filemap, and find the first free 148 * block from there. 149 */ 150 loops = 10; 151 do { 152 f->last_free_lookup = (f->num_maps - 1) * (r / (RAND_MAX+1.0)); 153 if (!get_next_free_block(td, f, ddir, b)) 154 return 0; 155 156 r = os_random_long(&td->random_state); 157 } while (--loops); 158 159 /* 160 * that didn't work either, try exhaustive search from the start 161 */ 162 f->last_free_lookup = 0; 163 return get_next_free_block(td, f, ddir, b); 164} 165 166/* 167 * For random io, generate a random new block and see if it's used. Repeat 168 * until we find a free one. For sequential io, just return the end of 169 * the last io issued. 170 */ 171static int get_next_offset(struct thread_data *td, struct io_u *io_u) 172{ 173 struct fio_file *f = io_u->file; 174 unsigned long long b; 175 enum fio_ddir ddir = io_u->ddir; 176 177 if (td_random(td) && (td->o.ddir_nr && !--td->ddir_nr)) { 178 td->ddir_nr = td->o.ddir_nr; 179 180 if (get_next_rand_offset(td, f, ddir, &b)) 181 return 1; 182 } else { 183 if (f->last_pos >= f->real_file_size) { 184 if (!td_random(td) || 185 get_next_rand_offset(td, f, ddir, &b)) 186 return 1; 187 } else 188 b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; 189 } 190 191 io_u->offset = b * td->o.min_bs[ddir]; 192 if (io_u->offset >= f->io_size) { 193 dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", 194 io_u->offset, f->io_size); 195 return 1; 196 } 197 198 io_u->offset += f->file_offset; 199 if (io_u->offset >= f->real_file_size) { 200 dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", 201 io_u->offset, f->real_file_size); 202 return 1; 203 } 204 205 return 0; 206} 207 208static unsigned int get_next_buflen(struct thread_data *td, struct io_u *io_u) 209{ 210 const int ddir = io_u->ddir; 211 unsigned int buflen; 212 long r; 213 214 if (td->o.min_bs[ddir] == td->o.max_bs[ddir]) 215 buflen = td->o.min_bs[ddir]; 216 else { 217 r = os_random_long(&td->bsrange_state); 218 if (!td->o.bssplit_nr) { 219 buflen = (unsigned int) 220 (1 + (double) (td->o.max_bs[ddir] - 1) 221 * r / (RAND_MAX + 1.0)); 222 } else { 223 long perc = 0; 224 unsigned int i; 225 226 for (i = 0; i < td->o.bssplit_nr; i++) { 227 struct bssplit *bsp = &td->o.bssplit[i]; 228 229 buflen = bsp->bs; 230 perc += bsp->perc; 231 if (r <= ((LONG_MAX / 100L) * perc)) 232 break; 233 } 234 } 235 if (!td->o.bs_unaligned) { 236 buflen = (buflen + td->o.min_bs[ddir] - 1) 237 & ~(td->o.min_bs[ddir] - 1); 238 } 239 } 240 241 if (io_u->offset + buflen > io_u->file->real_file_size) { 242 dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, 243 td->o.min_bs[ddir], ddir); 244 buflen = td->o.min_bs[ddir]; 245 } 246 247 return buflen; 248} 249 250static void set_rwmix_bytes(struct thread_data *td) 251{ 252 unsigned long issues; 253 unsigned int diff; 254 255 /* 256 * we do time or byte based switch. this is needed because 257 * buffered writes may issue a lot quicker than they complete, 258 * whereas reads do not. 259 */ 260 issues = td->io_issues[td->rwmix_ddir] - td->rwmix_issues; 261 diff = td->o.rwmix[td->rwmix_ddir ^ 1]; 262 263 td->rwmix_issues = td->io_issues[td->rwmix_ddir] 264 + (issues * ((100 - diff)) / diff); 265} 266 267static inline enum fio_ddir get_rand_ddir(struct thread_data *td) 268{ 269 unsigned int v; 270 long r; 271 272 r = os_random_long(&td->rwmix_state); 273 v = 1 + (int) (100.0 * (r / (RAND_MAX + 1.0))); 274 if (v < td->o.rwmix[DDIR_READ]) 275 return DDIR_READ; 276 277 return DDIR_WRITE; 278} 279 280/* 281 * Return the data direction for the next io_u. If the job is a 282 * mixed read/write workload, check the rwmix cycle and switch if 283 * necessary. 284 */ 285static enum fio_ddir get_rw_ddir(struct thread_data *td) 286{ 287 if (td_rw(td)) { 288 /* 289 * Check if it's time to seed a new data direction. 290 */ 291 if (td->io_issues[td->rwmix_ddir] >= td->rwmix_issues) { 292 unsigned long long max_bytes; 293 enum fio_ddir ddir; 294 295 /* 296 * Put a top limit on how many bytes we do for 297 * one data direction, to avoid overflowing the 298 * ranges too much 299 */ 300 ddir = get_rand_ddir(td); 301 max_bytes = td->this_io_bytes[ddir]; 302 if (max_bytes >= 303 (td->o.size * td->o.rwmix[ddir] / 100)) { 304 if (!td->rw_end_set[ddir]) 305 td->rw_end_set[ddir] = 1; 306 307 ddir ^= 1; 308 } 309 310 if (ddir != td->rwmix_ddir) 311 set_rwmix_bytes(td); 312 313 td->rwmix_ddir = ddir; 314 } 315 return td->rwmix_ddir; 316 } else if (td_read(td)) 317 return DDIR_READ; 318 else 319 return DDIR_WRITE; 320} 321 322static void put_file_log(struct thread_data *td, struct fio_file *f) 323{ 324 int ret = put_file(td, f); 325 326 if (ret) 327 td_verror(td, ret, "file close"); 328} 329 330void put_io_u(struct thread_data *td, struct io_u *io_u) 331{ 332 assert((io_u->flags & IO_U_F_FREE) == 0); 333 io_u->flags |= IO_U_F_FREE; 334 335 if (io_u->file) 336 put_file_log(td, io_u->file); 337 338 io_u->file = NULL; 339 list_del(&io_u->list); 340 list_add(&io_u->list, &td->io_u_freelist); 341 td->cur_depth--; 342} 343 344void requeue_io_u(struct thread_data *td, struct io_u **io_u) 345{ 346 struct io_u *__io_u = *io_u; 347 348 __io_u->flags |= IO_U_F_FREE; 349 if ((__io_u->flags & IO_U_F_FLIGHT) && (__io_u->ddir != DDIR_SYNC)) 350 td->io_issues[__io_u->ddir]--; 351 352 __io_u->flags &= ~IO_U_F_FLIGHT; 353 354 list_del(&__io_u->list); 355 list_add_tail(&__io_u->list, &td->io_u_requeues); 356 td->cur_depth--; 357 *io_u = NULL; 358} 359 360static int fill_io_u(struct thread_data *td, struct io_u *io_u) 361{ 362 if (td->io_ops->flags & FIO_NOIO) 363 goto out; 364 365 /* 366 * see if it's time to sync 367 */ 368 if (td->o.fsync_blocks && 369 !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks) && 370 td->io_issues[DDIR_WRITE] && should_fsync(td)) { 371 io_u->ddir = DDIR_SYNC; 372 goto out; 373 } 374 375 io_u->ddir = get_rw_ddir(td); 376 377 /* 378 * See if it's time to switch to a new zone 379 */ 380 if (td->zone_bytes >= td->o.zone_size) { 381 td->zone_bytes = 0; 382 io_u->file->last_pos += td->o.zone_skip; 383 td->io_skip_bytes += td->o.zone_skip; 384 } 385 386 /* 387 * No log, let the seq/rand engine retrieve the next buflen and 388 * position. 389 */ 390 if (get_next_offset(td, io_u)) { 391 dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); 392 return 1; 393 } 394 395 io_u->buflen = get_next_buflen(td, io_u); 396 if (!io_u->buflen) { 397 dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); 398 return 1; 399 } 400 401 if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { 402 dprint(FD_IO, "io_u %p, offset too large\n", io_u); 403 dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, 404 io_u->buflen, io_u->file->real_file_size); 405 return 1; 406 } 407 408 /* 409 * mark entry before potentially trimming io_u 410 */ 411 if (td_random(td) && file_randommap(td, io_u->file)) 412 mark_random_map(td, io_u); 413 414 /* 415 * If using a write iolog, store this entry. 416 */ 417out: 418 dprint_io_u(io_u, "fill_io_u"); 419 td->zone_bytes += io_u->buflen; 420 log_io_u(td, io_u); 421 return 0; 422} 423 424void io_u_mark_depth(struct thread_data *td, unsigned int nr) 425{ 426 int index = 0; 427 428 switch (td->cur_depth) { 429 default: 430 index = 6; 431 break; 432 case 32 ... 63: 433 index = 5; 434 break; 435 case 16 ... 31: 436 index = 4; 437 break; 438 case 8 ... 15: 439 index = 3; 440 break; 441 case 4 ... 7: 442 index = 2; 443 break; 444 case 2 ... 3: 445 index = 1; 446 case 1: 447 break; 448 } 449 450 td->ts.io_u_map[index] += nr; 451} 452 453static void io_u_mark_lat_usec(struct thread_data *td, unsigned long usec) 454{ 455 int index = 0; 456 457 assert(usec < 1000); 458 459 switch (usec) { 460 case 750 ... 999: 461 index = 9; 462 break; 463 case 500 ... 749: 464 index = 8; 465 break; 466 case 250 ... 499: 467 index = 7; 468 break; 469 case 100 ... 249: 470 index = 6; 471 break; 472 case 50 ... 99: 473 index = 5; 474 break; 475 case 20 ... 49: 476 index = 4; 477 break; 478 case 10 ... 19: 479 index = 3; 480 break; 481 case 4 ... 9: 482 index = 2; 483 break; 484 case 2 ... 3: 485 index = 1; 486 case 0 ... 1: 487 break; 488 } 489 490 assert(index < FIO_IO_U_LAT_U_NR); 491 td->ts.io_u_lat_u[index]++; 492} 493 494static void io_u_mark_lat_msec(struct thread_data *td, unsigned long msec) 495{ 496 int index = 0; 497 498 switch (msec) { 499 default: 500 index = 11; 501 break; 502 case 1000 ... 1999: 503 index = 10; 504 break; 505 case 750 ... 999: 506 index = 9; 507 break; 508 case 500 ... 749: 509 index = 8; 510 break; 511 case 250 ... 499: 512 index = 7; 513 break; 514 case 100 ... 249: 515 index = 6; 516 break; 517 case 50 ... 99: 518 index = 5; 519 break; 520 case 20 ... 49: 521 index = 4; 522 break; 523 case 10 ... 19: 524 index = 3; 525 break; 526 case 4 ... 9: 527 index = 2; 528 break; 529 case 2 ... 3: 530 index = 1; 531 case 0 ... 1: 532 break; 533 } 534 535 assert(index < FIO_IO_U_LAT_M_NR); 536 td->ts.io_u_lat_m[index]++; 537} 538 539static void io_u_mark_latency(struct thread_data *td, unsigned long usec) 540{ 541 if (usec < 1000) 542 io_u_mark_lat_usec(td, usec); 543 else 544 io_u_mark_lat_msec(td, usec / 1000); 545} 546 547/* 548 * Get next file to service by choosing one at random 549 */ 550static struct fio_file *get_next_file_rand(struct thread_data *td, int goodf, 551 int badf) 552{ 553 struct fio_file *f; 554 int fno; 555 556 do { 557 long r = os_random_long(&td->next_file_state); 558 559 fno = (unsigned int) ((double) td->o.nr_files 560 * (r / (RAND_MAX + 1.0))); 561 f = td->files[fno]; 562 if (f->flags & FIO_FILE_DONE) 563 continue; 564 565 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { 566 dprint(FD_FILE, "get_next_file_rand: %p\n", f); 567 return f; 568 } 569 } while (1); 570} 571 572/* 573 * Get next file to service by doing round robin between all available ones 574 */ 575static struct fio_file *get_next_file_rr(struct thread_data *td, int goodf, 576 int badf) 577{ 578 unsigned int old_next_file = td->next_file; 579 struct fio_file *f; 580 581 do { 582 f = td->files[td->next_file]; 583 584 td->next_file++; 585 if (td->next_file >= td->o.nr_files) 586 td->next_file = 0; 587 588 if (f->flags & FIO_FILE_DONE) { 589 f = NULL; 590 continue; 591 } 592 593 if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) 594 break; 595 596 f = NULL; 597 } while (td->next_file != old_next_file); 598 599 dprint(FD_FILE, "get_next_file_rr: %p\n", f); 600 return f; 601} 602 603static struct fio_file *get_next_file(struct thread_data *td) 604{ 605 struct fio_file *f; 606 607 assert(td->o.nr_files <= td->files_index); 608 609 if (!td->nr_open_files || td->nr_done_files >= td->o.nr_files) { 610 dprint(FD_FILE, "get_next_file: nr_open=%d, nr_done=%d," 611 " nr_files=%d\n", td->nr_open_files, 612 td->nr_done_files, 613 td->o.nr_files); 614 return NULL; 615 } 616 617 f = td->file_service_file; 618 if (f && (f->flags & FIO_FILE_OPEN) && td->file_service_left--) 619 goto out; 620 621 if (td->o.file_service_type == FIO_FSERVICE_RR) 622 f = get_next_file_rr(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); 623 else 624 f = get_next_file_rand(td, FIO_FILE_OPEN, FIO_FILE_CLOSING); 625 626 td->file_service_file = f; 627 td->file_service_left = td->file_service_nr - 1; 628out: 629 dprint(FD_FILE, "get_next_file: %p\n", f); 630 return f; 631} 632 633static struct fio_file *find_next_new_file(struct thread_data *td) 634{ 635 struct fio_file *f; 636 637 if (!td->nr_open_files || td->nr_done_files >= td->o.nr_files) 638 return NULL; 639 640 if (td->o.file_service_type == FIO_FSERVICE_RR) 641 f = get_next_file_rr(td, 0, FIO_FILE_OPEN); 642 else 643 f = get_next_file_rand(td, 0, FIO_FILE_OPEN); 644 645 return f; 646} 647 648static int set_io_u_file(struct thread_data *td, struct io_u *io_u) 649{ 650 struct fio_file *f; 651 652 do { 653 f = get_next_file(td); 654 if (!f) 655 return 1; 656 657set_file: 658 io_u->file = f; 659 get_file(f); 660 661 if (!fill_io_u(td, io_u)) 662 break; 663 664 /* 665 * optimization to prevent close/open of the same file. This 666 * way we preserve queueing etc. 667 */ 668 if (td->o.nr_files == 1 && td->o.time_based) { 669 put_file_log(td, f); 670 fio_file_reset(f); 671 goto set_file; 672 } 673 674 /* 675 * td_io_close() does a put_file() as well, so no need to 676 * do that here. 677 */ 678 io_u->file = NULL; 679 td_io_close_file(td, f); 680 f->flags |= FIO_FILE_DONE; 681 td->nr_done_files++; 682 683 /* 684 * probably not the right place to do this, but see 685 * if we need to open a new file 686 */ 687 if (td->nr_open_files < td->o.open_files && 688 td->o.open_files != td->o.nr_files) { 689 f = find_next_new_file(td); 690 691 if (!f || td_io_open_file(td, f)) 692 return 1; 693 694 goto set_file; 695 } 696 } while (1); 697 698 return 0; 699} 700 701 702struct io_u *__get_io_u(struct thread_data *td) 703{ 704 struct io_u *io_u = NULL; 705 706 if (!list_empty(&td->io_u_requeues)) 707 io_u = list_entry(td->io_u_requeues.next, struct io_u, list); 708 else if (!queue_full(td)) { 709 io_u = list_entry(td->io_u_freelist.next, struct io_u, list); 710 711 io_u->buflen = 0; 712 io_u->resid = 0; 713 io_u->file = NULL; 714 io_u->end_io = NULL; 715 } 716 717 if (io_u) { 718 assert(io_u->flags & IO_U_F_FREE); 719 io_u->flags &= ~IO_U_F_FREE; 720 721 io_u->error = 0; 722 list_del(&io_u->list); 723 list_add(&io_u->list, &td->io_u_busylist); 724 td->cur_depth++; 725 } 726 727 return io_u; 728} 729 730/* 731 * Return an io_u to be processed. Gets a buflen and offset, sets direction, 732 * etc. The returned io_u is fully ready to be prepped and submitted. 733 */ 734struct io_u *get_io_u(struct thread_data *td) 735{ 736 struct fio_file *f; 737 struct io_u *io_u; 738 739 io_u = __get_io_u(td); 740 if (!io_u) { 741 dprint(FD_IO, "__get_io_u failed\n"); 742 return NULL; 743 } 744 745 /* 746 * from a requeue, io_u already setup 747 */ 748 if (io_u->file) 749 goto out; 750 751 /* 752 * If using an iolog, grab next piece if any available. 753 */ 754 if (td->o.read_iolog_file) { 755 if (read_iolog_get(td, io_u)) 756 goto err_put; 757 } else if (set_io_u_file(td, io_u)) { 758 dprint(FD_IO, "io_u %p, setting file failed\n", io_u); 759 goto err_put; 760 } 761 762 f = io_u->file; 763 assert(f->flags & FIO_FILE_OPEN); 764 765 if (io_u->ddir != DDIR_SYNC) { 766 if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { 767 dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); 768 goto err_put; 769 } 770 771 f->last_pos = io_u->offset + io_u->buflen; 772 773 if (td->o.verify != VERIFY_NONE) 774 populate_verify_io_u(td, io_u); 775 } 776 777 /* 778 * Set io data pointers. 779 */ 780 io_u->endpos = io_u->offset + io_u->buflen; 781 io_u->xfer_buf = io_u->buf; 782 io_u->xfer_buflen = io_u->buflen; 783out: 784 if (!td_io_prep(td, io_u)) { 785 fio_gettime(&io_u->start_time, NULL); 786 return io_u; 787 } 788err_put: 789 dprint(FD_IO, "get_io_u failed\n"); 790 put_io_u(td, io_u); 791 return NULL; 792} 793 794void io_u_log_error(struct thread_data *td, struct io_u *io_u) 795{ 796 const char *msg[] = { "read", "write", "sync" }; 797 798 log_err("fio: io_u error"); 799 800 if (io_u->file) 801 log_err(" on file %s", io_u->file->file_name); 802 803 log_err(": %s\n", strerror(io_u->error)); 804 805 log_err(" %s offset=%llu, buflen=%lu\n", msg[io_u->ddir], 806 io_u->offset, io_u->xfer_buflen); 807 808 if (!td->error) 809 td_verror(td, io_u->error, "io_u error"); 810} 811 812static void io_completed(struct thread_data *td, struct io_u *io_u, 813 struct io_completion_data *icd) 814{ 815 unsigned long usec; 816 817 dprint_io_u(io_u, "io complete"); 818 819 assert(io_u->flags & IO_U_F_FLIGHT); 820 io_u->flags &= ~IO_U_F_FLIGHT; 821 822 if (io_u->ddir == DDIR_SYNC) { 823 td->last_was_sync = 1; 824 return; 825 } 826 827 td->last_was_sync = 0; 828 829 if (!io_u->error) { 830 unsigned int bytes = io_u->buflen - io_u->resid; 831 const enum fio_ddir idx = io_u->ddir; 832 int ret; 833 834 td->io_blocks[idx]++; 835 td->io_bytes[idx] += bytes; 836 td->this_io_bytes[idx] += bytes; 837 838 usec = utime_since(&io_u->issue_time, &icd->time); 839 840 add_clat_sample(td, idx, usec); 841 add_bw_sample(td, idx, &icd->time); 842 io_u_mark_latency(td, usec); 843 844 if (td_write(td) && idx == DDIR_WRITE && 845 td->o.do_verify && 846 td->o.verify != VERIFY_NONE) 847 log_io_piece(td, io_u); 848 849 icd->bytes_done[idx] += bytes; 850 851 if (io_u->end_io) { 852 ret = io_u->end_io(td, io_u); 853 if (ret && !icd->error) 854 icd->error = ret; 855 } 856 } else { 857 icd->error = io_u->error; 858 io_u_log_error(td, io_u); 859 } 860} 861 862static void init_icd(struct io_completion_data *icd, int nr) 863{ 864 fio_gettime(&icd->time, NULL); 865 866 icd->nr = nr; 867 868 icd->error = 0; 869 icd->bytes_done[0] = icd->bytes_done[1] = 0; 870} 871 872static void ios_completed(struct thread_data *td, 873 struct io_completion_data *icd) 874{ 875 struct io_u *io_u; 876 int i; 877 878 for (i = 0; i < icd->nr; i++) { 879 io_u = td->io_ops->event(td, i); 880 881 io_completed(td, io_u, icd); 882 put_io_u(td, io_u); 883 } 884} 885 886/* 887 * Complete a single io_u for the sync engines. 888 */ 889long io_u_sync_complete(struct thread_data *td, struct io_u *io_u) 890{ 891 struct io_completion_data icd; 892 893 init_icd(&icd, 1); 894 io_completed(td, io_u, &icd); 895 put_io_u(td, io_u); 896 897 if (!icd.error) 898 return icd.bytes_done[0] + icd.bytes_done[1]; 899 900 td_verror(td, icd.error, "io_u_sync_complete"); 901 return -1; 902} 903 904/* 905 * Called to complete min_events number of io for the async engines. 906 */ 907long io_u_queued_complete(struct thread_data *td, int min_events) 908{ 909 struct io_completion_data icd; 910 struct timespec *tvp = NULL; 911 int ret; 912 struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; 913 914 dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_events); 915 916 if (!min_events) 917 tvp = &ts; 918 919 ret = td_io_getevents(td, min_events, td->cur_depth, tvp); 920 if (ret < 0) { 921 td_verror(td, -ret, "td_io_getevents"); 922 return ret; 923 } else if (!ret) 924 return ret; 925 926 init_icd(&icd, ret); 927 ios_completed(td, &icd); 928 if (!icd.error) 929 return icd.bytes_done[0] + icd.bytes_done[1]; 930 931 td_verror(td, icd.error, "io_u_queued_complete"); 932 return -1; 933} 934 935/* 936 * Call when io_u is really queued, to update the submission latency. 937 */ 938void io_u_queued(struct thread_data *td, struct io_u *io_u) 939{ 940 unsigned long slat_time; 941 942 slat_time = utime_since(&io_u->start_time, &io_u->issue_time); 943 add_slat_sample(td, io_u->ddir, slat_time); 944} 945 946#ifdef FIO_USE_TIMEOUT 947void io_u_set_timeout(struct thread_data *td) 948{ 949 assert(td->cur_depth); 950 951 td->timer.it_interval.tv_sec = 0; 952 td->timer.it_interval.tv_usec = 0; 953 td->timer.it_value.tv_sec = IO_U_TIMEOUT + IO_U_TIMEOUT_INC; 954 td->timer.it_value.tv_usec = 0; 955 setitimer(ITIMER_REAL, &td->timer, NULL); 956 fio_gettime(&td->timeout_end, NULL); 957} 958 959static void io_u_dump(struct io_u *io_u) 960{ 961 unsigned long t_start = mtime_since_now(&io_u->start_time); 962 unsigned long t_issue = mtime_since_now(&io_u->issue_time); 963 964 log_err("io_u=%p, t_start=%lu, t_issue=%lu\n", io_u, t_start, t_issue); 965 log_err(" buf=%p/%p, len=%lu/%lu, offset=%llu\n", io_u->buf, 966 io_u->xfer_buf, io_u->buflen, 967 io_u->xfer_buflen, 968 io_u->offset); 969 log_err(" ddir=%d, fname=%s\n", io_u->ddir, io_u->file->file_name); 970} 971#else 972void io_u_set_timeout(struct thread_data fio_unused *td) 973{ 974} 975#endif 976 977#ifdef FIO_USE_TIMEOUT 978static void io_u_timeout_handler(int fio_unused sig) 979{ 980 struct thread_data *td, *__td; 981 pid_t pid = getpid(); 982 struct list_head *entry; 983 struct io_u *io_u; 984 int i; 985 986 log_err("fio: io_u timeout\n"); 987 988 /* 989 * TLS would be nice... 990 */ 991 td = NULL; 992 for_each_td(__td, i) { 993 if (__td->pid == pid) { 994 td = __td; 995 break; 996 } 997 } 998 999 if (!td) { 1000 log_err("fio: io_u timeout, can't find job\n"); 1001 exit(1); 1002 } 1003 1004 if (!td->cur_depth) { 1005 log_err("fio: timeout without pending work?\n"); 1006 return; 1007 } 1008 1009 log_err("fio: io_u timeout: job=%s, pid=%d\n", td->o.name, td->pid); 1010 1011 list_for_each(entry, &td->io_u_busylist) { 1012 io_u = list_entry(entry, struct io_u, list); 1013 1014 io_u_dump(io_u); 1015 } 1016 1017 td_verror(td, ETIMEDOUT, "io_u timeout"); 1018 exit(1); 1019} 1020#endif 1021 1022void io_u_init_timeout(void) 1023{ 1024#ifdef FIO_USE_TIMEOUT 1025 signal(SIGALRM, io_u_timeout_handler); 1026#endif 1027} 1028