backend.c revision 62244c9dbfa64f54a61b26af8f15a722362ac41a
1/* 2 * fio - the flexible io tester 3 * 4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de> 5 * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk> 6 * 7 * The license below covers all files distributed with fio unless otherwise 8 * noted in the file itself. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24#include <unistd.h> 25#include <fcntl.h> 26#include <string.h> 27#include <limits.h> 28#include <signal.h> 29#include <time.h> 30#include <locale.h> 31#include <assert.h> 32#include <time.h> 33#include <inttypes.h> 34#include <sys/stat.h> 35#include <sys/wait.h> 36#include <sys/ipc.h> 37#include <sys/mman.h> 38 39#include "fio.h" 40#ifndef FIO_NO_HAVE_SHM_H 41#include <sys/shm.h> 42#endif 43#include "hash.h" 44#include "smalloc.h" 45#include "verify.h" 46#include "trim.h" 47#include "diskutil.h" 48#include "cgroup.h" 49#include "profile.h" 50#include "lib/rand.h" 51#include "memalign.h" 52#include "server.h" 53#include "lib/getrusage.h" 54#include "idletime.h" 55#include "err.h" 56#include "lib/tp.h" 57 58static pthread_t disk_util_thread; 59static struct fio_mutex *disk_thread_mutex; 60static pthread_cond_t du_cond; 61static pthread_mutex_t du_lock; 62 63static struct fio_mutex *startup_mutex; 64static struct flist_head *cgroup_list; 65static char *cgroup_mnt; 66static int exit_value; 67static volatile int fio_abort; 68static unsigned int nr_process = 0; 69static unsigned int nr_thread = 0; 70 71struct io_log *agg_io_log[DDIR_RWDIR_CNT]; 72 73int groupid = 0; 74unsigned int thread_number = 0; 75unsigned int stat_number = 0; 76int shm_id = 0; 77int temp_stall_ts; 78unsigned long done_secs = 0; 79volatile int disk_util_exit = 0; 80 81#define PAGE_ALIGN(buf) \ 82 (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) 83 84#define JOB_START_TIMEOUT (5 * 1000) 85 86static void sig_int(int sig) 87{ 88 if (threads) { 89 if (is_backend) 90 fio_server_got_signal(sig); 91 else { 92 log_info("\nfio: terminating on signal %d\n", sig); 93 log_info_flush(); 94 exit_value = 128; 95 } 96 97 fio_terminate_threads(TERMINATE_ALL); 98 } 99} 100 101static void sig_show_status(int sig) 102{ 103 show_running_run_stats(); 104} 105 106static void set_sig_handlers(void) 107{ 108 struct sigaction act; 109 110 memset(&act, 0, sizeof(act)); 111 act.sa_handler = sig_int; 112 act.sa_flags = SA_RESTART; 113 sigaction(SIGINT, &act, NULL); 114 115 memset(&act, 0, sizeof(act)); 116 act.sa_handler = sig_int; 117 act.sa_flags = SA_RESTART; 118 sigaction(SIGTERM, &act, NULL); 119 120/* Windows uses SIGBREAK as a quit signal from other applications */ 121#ifdef WIN32 122 memset(&act, 0, sizeof(act)); 123 act.sa_handler = sig_int; 124 act.sa_flags = SA_RESTART; 125 sigaction(SIGBREAK, &act, NULL); 126#endif 127 128 memset(&act, 0, sizeof(act)); 129 act.sa_handler = sig_show_status; 130 act.sa_flags = SA_RESTART; 131 sigaction(SIGUSR1, &act, NULL); 132 133 if (is_backend) { 134 memset(&act, 0, sizeof(act)); 135 act.sa_handler = sig_int; 136 act.sa_flags = SA_RESTART; 137 sigaction(SIGPIPE, &act, NULL); 138 } 139} 140 141/* 142 * Check if we are above the minimum rate given. 143 */ 144static int __check_min_rate(struct thread_data *td, struct timeval *now, 145 enum fio_ddir ddir) 146{ 147 unsigned long long bytes = 0; 148 unsigned long iops = 0; 149 unsigned long spent; 150 unsigned long rate; 151 unsigned int ratemin = 0; 152 unsigned int rate_iops = 0; 153 unsigned int rate_iops_min = 0; 154 155 assert(ddir_rw(ddir)); 156 157 if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) 158 return 0; 159 160 /* 161 * allow a 2 second settle period in the beginning 162 */ 163 if (mtime_since(&td->start, now) < 2000) 164 return 0; 165 166 iops += td->this_io_blocks[ddir]; 167 bytes += td->this_io_bytes[ddir]; 168 ratemin += td->o.ratemin[ddir]; 169 rate_iops += td->o.rate_iops[ddir]; 170 rate_iops_min += td->o.rate_iops_min[ddir]; 171 172 /* 173 * if rate blocks is set, sample is running 174 */ 175 if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { 176 spent = mtime_since(&td->lastrate[ddir], now); 177 if (spent < td->o.ratecycle) 178 return 0; 179 180 if (td->o.rate[ddir]) { 181 /* 182 * check bandwidth specified rate 183 */ 184 if (bytes < td->rate_bytes[ddir]) { 185 log_err("%s: min rate %u not met\n", td->o.name, 186 ratemin); 187 return 1; 188 } else { 189 if (spent) 190 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; 191 else 192 rate = 0; 193 194 if (rate < ratemin || 195 bytes < td->rate_bytes[ddir]) { 196 log_err("%s: min rate %u not met, got" 197 " %luKB/sec\n", td->o.name, 198 ratemin, rate); 199 return 1; 200 } 201 } 202 } else { 203 /* 204 * checks iops specified rate 205 */ 206 if (iops < rate_iops) { 207 log_err("%s: min iops rate %u not met\n", 208 td->o.name, rate_iops); 209 return 1; 210 } else { 211 if (spent) 212 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; 213 else 214 rate = 0; 215 216 if (rate < rate_iops_min || 217 iops < td->rate_blocks[ddir]) { 218 log_err("%s: min iops rate %u not met," 219 " got %lu\n", td->o.name, 220 rate_iops_min, rate); 221 } 222 } 223 } 224 } 225 226 td->rate_bytes[ddir] = bytes; 227 td->rate_blocks[ddir] = iops; 228 memcpy(&td->lastrate[ddir], now, sizeof(*now)); 229 return 0; 230} 231 232static int check_min_rate(struct thread_data *td, struct timeval *now, 233 uint64_t *bytes_done) 234{ 235 int ret = 0; 236 237 if (bytes_done[DDIR_READ]) 238 ret |= __check_min_rate(td, now, DDIR_READ); 239 if (bytes_done[DDIR_WRITE]) 240 ret |= __check_min_rate(td, now, DDIR_WRITE); 241 if (bytes_done[DDIR_TRIM]) 242 ret |= __check_min_rate(td, now, DDIR_TRIM); 243 244 return ret; 245} 246 247/* 248 * When job exits, we can cancel the in-flight IO if we are using async 249 * io. Attempt to do so. 250 */ 251static void cleanup_pending_aio(struct thread_data *td) 252{ 253 int r; 254 255 /* 256 * get immediately available events, if any 257 */ 258 r = io_u_queued_complete(td, 0, NULL); 259 if (r < 0) 260 return; 261 262 /* 263 * now cancel remaining active events 264 */ 265 if (td->io_ops->cancel) { 266 struct io_u *io_u; 267 int i; 268 269 io_u_qiter(&td->io_u_all, io_u, i) { 270 if (io_u->flags & IO_U_F_FLIGHT) { 271 r = td->io_ops->cancel(td, io_u); 272 if (!r) 273 put_io_u(td, io_u); 274 } 275 } 276 } 277 278 if (td->cur_depth) 279 r = io_u_queued_complete(td, td->cur_depth, NULL); 280} 281 282/* 283 * Helper to handle the final sync of a file. Works just like the normal 284 * io path, just does everything sync. 285 */ 286static int fio_io_sync(struct thread_data *td, struct fio_file *f) 287{ 288 struct io_u *io_u = __get_io_u(td); 289 int ret; 290 291 if (!io_u) 292 return 1; 293 294 io_u->ddir = DDIR_SYNC; 295 io_u->file = f; 296 297 if (td_io_prep(td, io_u)) { 298 put_io_u(td, io_u); 299 return 1; 300 } 301 302requeue: 303 ret = td_io_queue(td, io_u); 304 if (ret < 0) { 305 td_verror(td, io_u->error, "td_io_queue"); 306 put_io_u(td, io_u); 307 return 1; 308 } else if (ret == FIO_Q_QUEUED) { 309 if (io_u_queued_complete(td, 1, NULL) < 0) 310 return 1; 311 } else if (ret == FIO_Q_COMPLETED) { 312 if (io_u->error) { 313 td_verror(td, io_u->error, "td_io_queue"); 314 return 1; 315 } 316 317 if (io_u_sync_complete(td, io_u, NULL) < 0) 318 return 1; 319 } else if (ret == FIO_Q_BUSY) { 320 if (td_io_commit(td)) 321 return 1; 322 goto requeue; 323 } 324 325 return 0; 326} 327 328static int fio_file_fsync(struct thread_data *td, struct fio_file *f) 329{ 330 int ret; 331 332 if (fio_file_open(f)) 333 return fio_io_sync(td, f); 334 335 if (td_io_open_file(td, f)) 336 return 1; 337 338 ret = fio_io_sync(td, f); 339 td_io_close_file(td, f); 340 return ret; 341} 342 343static inline void __update_tv_cache(struct thread_data *td) 344{ 345 fio_gettime(&td->tv_cache, NULL); 346} 347 348static inline void update_tv_cache(struct thread_data *td) 349{ 350 if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) 351 __update_tv_cache(td); 352} 353 354static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) 355{ 356 if (in_ramp_time(td)) 357 return 0; 358 if (!td->o.timeout) 359 return 0; 360 if (utime_since(&td->epoch, t) >= td->o.timeout) 361 return 1; 362 363 return 0; 364} 365 366static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, 367 int *retptr) 368{ 369 int ret = *retptr; 370 371 if (ret < 0 || td->error) { 372 int err = td->error; 373 enum error_type_bit eb; 374 375 if (ret < 0) 376 err = -ret; 377 378 eb = td_error_type(ddir, err); 379 if (!(td->o.continue_on_error & (1 << eb))) 380 return 1; 381 382 if (td_non_fatal_error(td, eb, err)) { 383 /* 384 * Continue with the I/Os in case of 385 * a non fatal error. 386 */ 387 update_error_count(td, err); 388 td_clear_error(td); 389 *retptr = 0; 390 return 0; 391 } else if (td->o.fill_device && err == ENOSPC) { 392 /* 393 * We expect to hit this error if 394 * fill_device option is set. 395 */ 396 td_clear_error(td); 397 fio_mark_td_terminate(td); 398 return 1; 399 } else { 400 /* 401 * Stop the I/O in case of a fatal 402 * error. 403 */ 404 update_error_count(td, err); 405 return 1; 406 } 407 } 408 409 return 0; 410} 411 412static void check_update_rusage(struct thread_data *td) 413{ 414 if (td->update_rusage) { 415 td->update_rusage = 0; 416 update_rusage_stat(td); 417 fio_mutex_up(td->rusage_sem); 418 } 419} 420 421/* 422 * The main verify engine. Runs over the writes we previously submitted, 423 * reads the blocks back in, and checks the crc/md5 of the data. 424 */ 425static void do_verify(struct thread_data *td, uint64_t verify_bytes) 426{ 427 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 428 struct fio_file *f; 429 struct io_u *io_u; 430 int ret, min_events; 431 unsigned int i; 432 433 dprint(FD_VERIFY, "starting loop\n"); 434 435 /* 436 * sync io first and invalidate cache, to make sure we really 437 * read from disk. 438 */ 439 for_each_file(td, f, i) { 440 if (!fio_file_open(f)) 441 continue; 442 if (fio_io_sync(td, f)) 443 break; 444 if (file_invalidate_cache(td, f)) 445 break; 446 } 447 448 check_update_rusage(td); 449 450 if (td->error) 451 return; 452 453 td_set_runstate(td, TD_VERIFYING); 454 455 io_u = NULL; 456 while (!td->terminate) { 457 enum fio_ddir ddir; 458 int ret2, full; 459 460 update_tv_cache(td); 461 check_update_rusage(td); 462 463 if (runtime_exceeded(td, &td->tv_cache)) { 464 __update_tv_cache(td); 465 if (runtime_exceeded(td, &td->tv_cache)) { 466 fio_mark_td_terminate(td); 467 break; 468 } 469 } 470 471 if (flow_threshold_exceeded(td)) 472 continue; 473 474 if (!td->o.experimental_verify) { 475 io_u = __get_io_u(td); 476 if (!io_u) 477 break; 478 479 if (get_next_verify(td, io_u)) { 480 put_io_u(td, io_u); 481 break; 482 } 483 484 if (td_io_prep(td, io_u)) { 485 put_io_u(td, io_u); 486 break; 487 } 488 } else { 489 if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes) 490 break; 491 492 while ((io_u = get_io_u(td)) != NULL) { 493 if (IS_ERR(io_u)) { 494 io_u = NULL; 495 ret = FIO_Q_BUSY; 496 goto reap; 497 } 498 499 /* 500 * We are only interested in the places where 501 * we wrote or trimmed IOs. Turn those into 502 * reads for verification purposes. 503 */ 504 if (io_u->ddir == DDIR_READ) { 505 /* 506 * Pretend we issued it for rwmix 507 * accounting 508 */ 509 td->io_issues[DDIR_READ]++; 510 put_io_u(td, io_u); 511 continue; 512 } else if (io_u->ddir == DDIR_TRIM) { 513 io_u->ddir = DDIR_READ; 514 io_u->flags |= IO_U_F_TRIMMED; 515 break; 516 } else if (io_u->ddir == DDIR_WRITE) { 517 io_u->ddir = DDIR_READ; 518 break; 519 } else { 520 put_io_u(td, io_u); 521 continue; 522 } 523 } 524 525 if (!io_u) 526 break; 527 } 528 529 if (td->o.verify_async) 530 io_u->end_io = verify_io_u_async; 531 else 532 io_u->end_io = verify_io_u; 533 534 ddir = io_u->ddir; 535 536 ret = td_io_queue(td, io_u); 537 switch (ret) { 538 case FIO_Q_COMPLETED: 539 if (io_u->error) { 540 ret = -io_u->error; 541 clear_io_u(td, io_u); 542 } else if (io_u->resid) { 543 int bytes = io_u->xfer_buflen - io_u->resid; 544 545 /* 546 * zero read, fail 547 */ 548 if (!bytes) { 549 td_verror(td, EIO, "full resid"); 550 put_io_u(td, io_u); 551 break; 552 } 553 554 io_u->xfer_buflen = io_u->resid; 555 io_u->xfer_buf += bytes; 556 io_u->offset += bytes; 557 558 if (ddir_rw(io_u->ddir)) 559 td->ts.short_io_u[io_u->ddir]++; 560 561 f = io_u->file; 562 if (io_u->offset == f->real_file_size) 563 goto sync_done; 564 565 requeue_io_u(td, &io_u); 566 } else { 567sync_done: 568 ret = io_u_sync_complete(td, io_u, bytes_done); 569 if (ret < 0) 570 break; 571 } 572 continue; 573 case FIO_Q_QUEUED: 574 break; 575 case FIO_Q_BUSY: 576 requeue_io_u(td, &io_u); 577 ret2 = td_io_commit(td); 578 if (ret2 < 0) 579 ret = ret2; 580 break; 581 default: 582 assert(ret < 0); 583 td_verror(td, -ret, "td_io_queue"); 584 break; 585 } 586 587 if (break_on_this_error(td, ddir, &ret)) 588 break; 589 590 /* 591 * if we can queue more, do so. but check if there are 592 * completed io_u's first. Note that we can get BUSY even 593 * without IO queued, if the system is resource starved. 594 */ 595reap: 596 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 597 if (full || !td->o.iodepth_batch_complete) { 598 min_events = min(td->o.iodepth_batch_complete, 599 td->cur_depth); 600 /* 601 * if the queue is full, we MUST reap at least 1 event 602 */ 603 if (full && !min_events) 604 min_events = 1; 605 606 do { 607 /* 608 * Reap required number of io units, if any, 609 * and do the verification on them through 610 * the callback handler 611 */ 612 if (io_u_queued_complete(td, min_events, bytes_done) < 0) { 613 ret = -1; 614 break; 615 } 616 } while (full && (td->cur_depth > td->o.iodepth_low)); 617 } 618 if (ret < 0) 619 break; 620 } 621 622 check_update_rusage(td); 623 624 if (!td->error) { 625 min_events = td->cur_depth; 626 627 if (min_events) 628 ret = io_u_queued_complete(td, min_events, NULL); 629 } else 630 cleanup_pending_aio(td); 631 632 td_set_runstate(td, TD_RUNNING); 633 634 dprint(FD_VERIFY, "exiting loop\n"); 635} 636 637static unsigned int exceeds_number_ios(struct thread_data *td) 638{ 639 unsigned long long number_ios; 640 641 if (!td->o.number_ios) 642 return 0; 643 644 number_ios = ddir_rw_sum(td->this_io_blocks); 645 number_ios += td->io_u_queued + td->io_u_in_flight; 646 647 return number_ios >= td->o.number_ios; 648} 649 650static int io_bytes_exceeded(struct thread_data *td) 651{ 652 unsigned long long bytes, limit; 653 654 if (td_rw(td)) 655 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; 656 else if (td_write(td)) 657 bytes = td->this_io_bytes[DDIR_WRITE]; 658 else if (td_read(td)) 659 bytes = td->this_io_bytes[DDIR_READ]; 660 else 661 bytes = td->this_io_bytes[DDIR_TRIM]; 662 663 if (td->o.io_limit) 664 limit = td->o.io_limit; 665 else 666 limit = td->o.size; 667 668 return bytes >= limit || exceeds_number_ios(td); 669} 670 671/* 672 * Main IO worker function. It retrieves io_u's to process and queues 673 * and reaps them, checking for rate and errors along the way. 674 * 675 * Returns number of bytes written and trimmed. 676 */ 677static uint64_t do_io(struct thread_data *td) 678{ 679 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 680 unsigned int i; 681 int ret = 0; 682 uint64_t total_bytes, bytes_issued = 0; 683 684 if (in_ramp_time(td)) 685 td_set_runstate(td, TD_RAMP); 686 else 687 td_set_runstate(td, TD_RUNNING); 688 689 lat_target_init(td); 690 691 /* 692 * If verify_backlog is enabled, we'll run the verify in this 693 * handler as well. For that case, we may need up to twice the 694 * amount of bytes. 695 */ 696 total_bytes = td->o.size; 697 if (td->o.verify != VERIFY_NONE && 698 (td_write(td) && td->o.verify_backlog)) 699 total_bytes += td->o.size; 700 701 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || 702 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || 703 td->o.time_based) { 704 struct timeval comp_time; 705 int min_evts = 0; 706 struct io_u *io_u; 707 int ret2, full; 708 enum fio_ddir ddir; 709 710 check_update_rusage(td); 711 712 if (td->terminate || td->done) 713 break; 714 715 update_tv_cache(td); 716 717 if (runtime_exceeded(td, &td->tv_cache)) { 718 __update_tv_cache(td); 719 if (runtime_exceeded(td, &td->tv_cache)) { 720 fio_mark_td_terminate(td); 721 break; 722 } 723 } 724 725 if (flow_threshold_exceeded(td)) 726 continue; 727 728 if (bytes_issued >= total_bytes) 729 break; 730 731 io_u = get_io_u(td); 732 if (IS_ERR_OR_NULL(io_u)) { 733 int err = PTR_ERR(io_u); 734 735 io_u = NULL; 736 if (err == -EBUSY) { 737 ret = FIO_Q_BUSY; 738 goto reap; 739 } 740 if (td->o.latency_target) 741 goto reap; 742 break; 743 } 744 745 ddir = io_u->ddir; 746 747 /* 748 * Add verification end_io handler if: 749 * - Asked to verify (!td_rw(td)) 750 * - Or the io_u is from our verify list (mixed write/ver) 751 */ 752 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && 753 ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { 754 755 if (!td->o.verify_pattern_bytes) { 756 io_u->rand_seed = __rand(&td->__verify_state); 757 if (sizeof(int) != sizeof(long *)) 758 io_u->rand_seed *= __rand(&td->__verify_state); 759 } 760 761 if (td->o.verify_async) 762 io_u->end_io = verify_io_u_async; 763 else 764 io_u->end_io = verify_io_u; 765 td_set_runstate(td, TD_VERIFYING); 766 } else if (in_ramp_time(td)) 767 td_set_runstate(td, TD_RAMP); 768 else 769 td_set_runstate(td, TD_RUNNING); 770 771 /* 772 * Always log IO before it's issued, so we know the specific 773 * order of it. The logged unit will track when the IO has 774 * completed. 775 */ 776 if (td_write(td) && io_u->ddir == DDIR_WRITE && 777 td->o.do_verify && 778 td->o.verify != VERIFY_NONE && 779 !td->o.experimental_verify) 780 log_io_piece(td, io_u); 781 782 ret = td_io_queue(td, io_u); 783 switch (ret) { 784 case FIO_Q_COMPLETED: 785 if (io_u->error) { 786 ret = -io_u->error; 787 unlog_io_piece(td, io_u); 788 clear_io_u(td, io_u); 789 } else if (io_u->resid) { 790 int bytes = io_u->xfer_buflen - io_u->resid; 791 struct fio_file *f = io_u->file; 792 793 bytes_issued += bytes; 794 795 trim_io_piece(td, io_u); 796 797 /* 798 * zero read, fail 799 */ 800 if (!bytes) { 801 unlog_io_piece(td, io_u); 802 td_verror(td, EIO, "full resid"); 803 put_io_u(td, io_u); 804 break; 805 } 806 807 io_u->xfer_buflen = io_u->resid; 808 io_u->xfer_buf += bytes; 809 io_u->offset += bytes; 810 811 if (ddir_rw(io_u->ddir)) 812 td->ts.short_io_u[io_u->ddir]++; 813 814 if (io_u->offset == f->real_file_size) 815 goto sync_done; 816 817 requeue_io_u(td, &io_u); 818 } else { 819sync_done: 820 if (__should_check_rate(td, DDIR_READ) || 821 __should_check_rate(td, DDIR_WRITE) || 822 __should_check_rate(td, DDIR_TRIM)) 823 fio_gettime(&comp_time, NULL); 824 825 ret = io_u_sync_complete(td, io_u, bytes_done); 826 if (ret < 0) 827 break; 828 bytes_issued += io_u->xfer_buflen; 829 } 830 break; 831 case FIO_Q_QUEUED: 832 /* 833 * if the engine doesn't have a commit hook, 834 * the io_u is really queued. if it does have such 835 * a hook, it has to call io_u_queued() itself. 836 */ 837 if (td->io_ops->commit == NULL) 838 io_u_queued(td, io_u); 839 bytes_issued += io_u->xfer_buflen; 840 break; 841 case FIO_Q_BUSY: 842 unlog_io_piece(td, io_u); 843 requeue_io_u(td, &io_u); 844 ret2 = td_io_commit(td); 845 if (ret2 < 0) 846 ret = ret2; 847 break; 848 default: 849 assert(ret < 0); 850 put_io_u(td, io_u); 851 break; 852 } 853 854 if (break_on_this_error(td, ddir, &ret)) 855 break; 856 857 /* 858 * See if we need to complete some commands. Note that we 859 * can get BUSY even without IO queued, if the system is 860 * resource starved. 861 */ 862reap: 863 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 864 if (full || !td->o.iodepth_batch_complete) { 865 min_evts = min(td->o.iodepth_batch_complete, 866 td->cur_depth); 867 /* 868 * if the queue is full, we MUST reap at least 1 event 869 */ 870 if (full && !min_evts) 871 min_evts = 1; 872 873 if (__should_check_rate(td, DDIR_READ) || 874 __should_check_rate(td, DDIR_WRITE) || 875 __should_check_rate(td, DDIR_TRIM)) 876 fio_gettime(&comp_time, NULL); 877 878 do { 879 ret = io_u_queued_complete(td, min_evts, bytes_done); 880 if (ret < 0) 881 break; 882 883 } while (full && (td->cur_depth > td->o.iodepth_low)); 884 } 885 886 if (ret < 0) 887 break; 888 if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) 889 continue; 890 891 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { 892 if (check_min_rate(td, &comp_time, bytes_done)) { 893 if (exitall_on_terminate) 894 fio_terminate_threads(td->groupid); 895 td_verror(td, EIO, "check_min_rate"); 896 break; 897 } 898 } 899 if (!in_ramp_time(td) && td->o.latency_target) 900 lat_target_check(td); 901 902 if (td->o.thinktime) { 903 unsigned long long b; 904 905 b = ddir_rw_sum(td->io_blocks); 906 if (!(b % td->o.thinktime_blocks)) { 907 int left; 908 909 io_u_quiesce(td); 910 911 if (td->o.thinktime_spin) 912 usec_spin(td->o.thinktime_spin); 913 914 left = td->o.thinktime - td->o.thinktime_spin; 915 if (left) 916 usec_sleep(td, left); 917 } 918 } 919 } 920 921 check_update_rusage(td); 922 923 if (td->trim_entries) 924 log_err("fio: %lu trim entries leaked?\n", td->trim_entries); 925 926 if (td->o.fill_device && td->error == ENOSPC) { 927 td->error = 0; 928 fio_mark_td_terminate(td); 929 } 930 if (!td->error) { 931 struct fio_file *f; 932 933 i = td->cur_depth; 934 if (i) { 935 ret = io_u_queued_complete(td, i, bytes_done); 936 if (td->o.fill_device && td->error == ENOSPC) 937 td->error = 0; 938 } 939 940 if (should_fsync(td) && td->o.end_fsync) { 941 td_set_runstate(td, TD_FSYNCING); 942 943 for_each_file(td, f, i) { 944 if (!fio_file_fsync(td, f)) 945 continue; 946 947 log_err("fio: end_fsync failed for file %s\n", 948 f->file_name); 949 } 950 } 951 } else 952 cleanup_pending_aio(td); 953 954 /* 955 * stop job if we failed doing any IO 956 */ 957 if (!ddir_rw_sum(td->this_io_bytes)) 958 td->done = 1; 959 960 return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; 961} 962 963static void cleanup_io_u(struct thread_data *td) 964{ 965 struct io_u *io_u; 966 967 while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) { 968 969 if (td->io_ops->io_u_free) 970 td->io_ops->io_u_free(td, io_u); 971 972 fio_memfree(io_u, sizeof(*io_u)); 973 } 974 975 free_io_mem(td); 976 977 io_u_rexit(&td->io_u_requeues); 978 io_u_qexit(&td->io_u_freelist); 979 io_u_qexit(&td->io_u_all); 980} 981 982static int init_io_u(struct thread_data *td) 983{ 984 struct io_u *io_u; 985 unsigned int max_bs, min_write; 986 int cl_align, i, max_units; 987 int data_xfer = 1, err; 988 char *p; 989 990 max_units = td->o.iodepth; 991 max_bs = td_max_bs(td); 992 min_write = td->o.min_bs[DDIR_WRITE]; 993 td->orig_buffer_size = (unsigned long long) max_bs 994 * (unsigned long long) max_units; 995 996 if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td))) 997 data_xfer = 0; 998 999 err = 0; 1000 err += io_u_rinit(&td->io_u_requeues, td->o.iodepth); 1001 err += io_u_qinit(&td->io_u_freelist, td->o.iodepth); 1002 err += io_u_qinit(&td->io_u_all, td->o.iodepth); 1003 1004 if (err) { 1005 log_err("fio: failed setting up IO queues\n"); 1006 return 1; 1007 } 1008 1009 /* 1010 * if we may later need to do address alignment, then add any 1011 * possible adjustment here so that we don't cause a buffer 1012 * overflow later. this adjustment may be too much if we get 1013 * lucky and the allocator gives us an aligned address. 1014 */ 1015 if (td->o.odirect || td->o.mem_align || td->o.oatomic || 1016 (td->io_ops->flags & FIO_RAWIO)) 1017 td->orig_buffer_size += page_mask + td->o.mem_align; 1018 1019 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { 1020 unsigned long bs; 1021 1022 bs = td->orig_buffer_size + td->o.hugepage_size - 1; 1023 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); 1024 } 1025 1026 if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { 1027 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); 1028 return 1; 1029 } 1030 1031 if (data_xfer && allocate_io_mem(td)) 1032 return 1; 1033 1034 if (td->o.odirect || td->o.mem_align || td->o.oatomic || 1035 (td->io_ops->flags & FIO_RAWIO)) 1036 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; 1037 else 1038 p = td->orig_buffer; 1039 1040 cl_align = os_cache_line_size(); 1041 1042 for (i = 0; i < max_units; i++) { 1043 void *ptr; 1044 1045 if (td->terminate) 1046 return 1; 1047 1048 ptr = fio_memalign(cl_align, sizeof(*io_u)); 1049 if (!ptr) { 1050 log_err("fio: unable to allocate aligned memory\n"); 1051 break; 1052 } 1053 1054 io_u = ptr; 1055 memset(io_u, 0, sizeof(*io_u)); 1056 INIT_FLIST_HEAD(&io_u->verify_list); 1057 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); 1058 1059 if (data_xfer) { 1060 io_u->buf = p; 1061 dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); 1062 1063 if (td_write(td)) 1064 io_u_fill_buffer(td, io_u, min_write, max_bs); 1065 if (td_write(td) && td->o.verify_pattern_bytes) { 1066 /* 1067 * Fill the buffer with the pattern if we are 1068 * going to be doing writes. 1069 */ 1070 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); 1071 } 1072 } 1073 1074 io_u->index = i; 1075 io_u->flags = IO_U_F_FREE; 1076 io_u_qpush(&td->io_u_freelist, io_u); 1077 1078 /* 1079 * io_u never leaves this stack, used for iteration of all 1080 * io_u buffers. 1081 */ 1082 io_u_qpush(&td->io_u_all, io_u); 1083 1084 if (td->io_ops->io_u_init) { 1085 int ret = td->io_ops->io_u_init(td, io_u); 1086 1087 if (ret) { 1088 log_err("fio: failed to init engine data: %d\n", ret); 1089 return 1; 1090 } 1091 } 1092 1093 p += max_bs; 1094 } 1095 1096 return 0; 1097} 1098 1099static int switch_ioscheduler(struct thread_data *td) 1100{ 1101 char tmp[256], tmp2[128]; 1102 FILE *f; 1103 int ret; 1104 1105 if (td->io_ops->flags & FIO_DISKLESSIO) 1106 return 0; 1107 1108 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); 1109 1110 f = fopen(tmp, "r+"); 1111 if (!f) { 1112 if (errno == ENOENT) { 1113 log_err("fio: os or kernel doesn't support IO scheduler" 1114 " switching\n"); 1115 return 0; 1116 } 1117 td_verror(td, errno, "fopen iosched"); 1118 return 1; 1119 } 1120 1121 /* 1122 * Set io scheduler. 1123 */ 1124 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); 1125 if (ferror(f) || ret != 1) { 1126 td_verror(td, errno, "fwrite"); 1127 fclose(f); 1128 return 1; 1129 } 1130 1131 rewind(f); 1132 1133 /* 1134 * Read back and check that the selected scheduler is now the default. 1135 */ 1136 ret = fread(tmp, sizeof(tmp), 1, f); 1137 if (ferror(f) || ret < 0) { 1138 td_verror(td, errno, "fread"); 1139 fclose(f); 1140 return 1; 1141 } 1142 tmp[sizeof(tmp) - 1] = '\0'; 1143 1144 1145 sprintf(tmp2, "[%s]", td->o.ioscheduler); 1146 if (!strstr(tmp, tmp2)) { 1147 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); 1148 td_verror(td, EINVAL, "iosched_switch"); 1149 fclose(f); 1150 return 1; 1151 } 1152 1153 fclose(f); 1154 return 0; 1155} 1156 1157static int keep_running(struct thread_data *td) 1158{ 1159 unsigned long long limit; 1160 1161 if (td->done) 1162 return 0; 1163 if (td->o.time_based) 1164 return 1; 1165 if (td->o.loops) { 1166 td->o.loops--; 1167 return 1; 1168 } 1169 if (exceeds_number_ios(td)) 1170 return 0; 1171 1172 if (td->o.io_limit) 1173 limit = td->o.io_limit; 1174 else 1175 limit = td->o.size; 1176 1177 if (limit != -1ULL && ddir_rw_sum(td->io_bytes) < limit) { 1178 uint64_t diff; 1179 1180 /* 1181 * If the difference is less than the minimum IO size, we 1182 * are done. 1183 */ 1184 diff = limit - ddir_rw_sum(td->io_bytes); 1185 if (diff < td_max_bs(td)) 1186 return 0; 1187 1188 if (fio_files_done(td)) 1189 return 0; 1190 1191 return 1; 1192 } 1193 1194 return 0; 1195} 1196 1197static int exec_string(struct thread_options *o, const char *string, const char *mode) 1198{ 1199 int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; 1200 char *str; 1201 1202 str = malloc(newlen); 1203 sprintf(str, "%s &> %s.%s.txt", string, o->name, mode); 1204 1205 log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode); 1206 ret = system(str); 1207 if (ret == -1) 1208 log_err("fio: exec of cmd <%s> failed\n", str); 1209 1210 free(str); 1211 return ret; 1212} 1213 1214/* 1215 * Dry run to compute correct state of numberio for verification. 1216 */ 1217static uint64_t do_dry_run(struct thread_data *td) 1218{ 1219 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 1220 1221 td_set_runstate(td, TD_RUNNING); 1222 1223 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || 1224 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) { 1225 struct io_u *io_u; 1226 int ret; 1227 1228 if (td->terminate || td->done) 1229 break; 1230 1231 io_u = get_io_u(td); 1232 if (!io_u) 1233 break; 1234 1235 io_u->flags |= IO_U_F_FLIGHT; 1236 io_u->error = 0; 1237 io_u->resid = 0; 1238 if (ddir_rw(acct_ddir(io_u))) 1239 td->io_issues[acct_ddir(io_u)]++; 1240 if (ddir_rw(io_u->ddir)) { 1241 io_u_mark_depth(td, 1); 1242 td->ts.total_io_u[io_u->ddir]++; 1243 } 1244 1245 if (td_write(td) && io_u->ddir == DDIR_WRITE && 1246 td->o.do_verify && 1247 td->o.verify != VERIFY_NONE && 1248 !td->o.experimental_verify) 1249 log_io_piece(td, io_u); 1250 1251 ret = io_u_sync_complete(td, io_u, bytes_done); 1252 (void) ret; 1253 } 1254 1255 return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; 1256} 1257 1258/* 1259 * Entry point for the thread based jobs. The process based jobs end up 1260 * here as well, after a little setup. 1261 */ 1262static void *thread_main(void *data) 1263{ 1264 unsigned long long elapsed; 1265 struct thread_data *td = data; 1266 struct thread_options *o = &td->o; 1267 pthread_condattr_t attr; 1268 int clear_state; 1269 int ret; 1270 1271 if (!o->use_thread) { 1272 setsid(); 1273 td->pid = getpid(); 1274 } else 1275 td->pid = gettid(); 1276 1277 fio_local_clock_init(o->use_thread); 1278 1279 dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); 1280 1281 if (is_backend) 1282 fio_server_send_start(td); 1283 1284 INIT_FLIST_HEAD(&td->io_log_list); 1285 INIT_FLIST_HEAD(&td->io_hist_list); 1286 INIT_FLIST_HEAD(&td->verify_list); 1287 INIT_FLIST_HEAD(&td->trim_list); 1288 INIT_FLIST_HEAD(&td->next_rand_list); 1289 pthread_mutex_init(&td->io_u_lock, NULL); 1290 td->io_hist_tree = RB_ROOT; 1291 1292 pthread_condattr_init(&attr); 1293 pthread_cond_init(&td->verify_cond, &attr); 1294 pthread_cond_init(&td->free_cond, &attr); 1295 1296 td_set_runstate(td, TD_INITIALIZED); 1297 dprint(FD_MUTEX, "up startup_mutex\n"); 1298 fio_mutex_up(startup_mutex); 1299 dprint(FD_MUTEX, "wait on td->mutex\n"); 1300 fio_mutex_down(td->mutex); 1301 dprint(FD_MUTEX, "done waiting on td->mutex\n"); 1302 1303 /* 1304 * A new gid requires privilege, so we need to do this before setting 1305 * the uid. 1306 */ 1307 if (o->gid != -1U && setgid(o->gid)) { 1308 td_verror(td, errno, "setgid"); 1309 goto err; 1310 } 1311 if (o->uid != -1U && setuid(o->uid)) { 1312 td_verror(td, errno, "setuid"); 1313 goto err; 1314 } 1315 1316 /* 1317 * If we have a gettimeofday() thread, make sure we exclude that 1318 * thread from this job 1319 */ 1320 if (o->gtod_cpu) 1321 fio_cpu_clear(&o->cpumask, o->gtod_cpu); 1322 1323 /* 1324 * Set affinity first, in case it has an impact on the memory 1325 * allocations. 1326 */ 1327 if (o->cpumask_set) { 1328 if (o->cpus_allowed_policy == FIO_CPUS_SPLIT) { 1329 ret = fio_cpus_split(&o->cpumask, td->thread_number - 1); 1330 if (!ret) { 1331 log_err("fio: no CPUs set\n"); 1332 log_err("fio: Try increasing number of available CPUs\n"); 1333 td_verror(td, EINVAL, "cpus_split"); 1334 goto err; 1335 } 1336 } 1337 ret = fio_setaffinity(td->pid, o->cpumask); 1338 if (ret == -1) { 1339 td_verror(td, errno, "cpu_set_affinity"); 1340 goto err; 1341 } 1342 } 1343 1344#ifdef CONFIG_LIBNUMA 1345 /* numa node setup */ 1346 if (o->numa_cpumask_set || o->numa_memmask_set) { 1347 struct bitmask *mask; 1348 int ret; 1349 1350 if (numa_available() < 0) { 1351 td_verror(td, errno, "Does not support NUMA API\n"); 1352 goto err; 1353 } 1354 1355 if (o->numa_cpumask_set) { 1356 mask = numa_parse_nodestring(o->numa_cpunodes); 1357 ret = numa_run_on_node_mask(mask); 1358 numa_free_nodemask(mask); 1359 if (ret == -1) { 1360 td_verror(td, errno, \ 1361 "numa_run_on_node_mask failed\n"); 1362 goto err; 1363 } 1364 } 1365 1366 if (o->numa_memmask_set) { 1367 1368 mask = NULL; 1369 if (o->numa_memnodes) 1370 mask = numa_parse_nodestring(o->numa_memnodes); 1371 1372 switch (o->numa_mem_mode) { 1373 case MPOL_INTERLEAVE: 1374 numa_set_interleave_mask(mask); 1375 break; 1376 case MPOL_BIND: 1377 numa_set_membind(mask); 1378 break; 1379 case MPOL_LOCAL: 1380 numa_set_localalloc(); 1381 break; 1382 case MPOL_PREFERRED: 1383 numa_set_preferred(o->numa_mem_prefer_node); 1384 break; 1385 case MPOL_DEFAULT: 1386 default: 1387 break; 1388 } 1389 1390 if (mask) 1391 numa_free_nodemask(mask); 1392 1393 } 1394 } 1395#endif 1396 1397 if (fio_pin_memory(td)) 1398 goto err; 1399 1400 /* 1401 * May alter parameters that init_io_u() will use, so we need to 1402 * do this first. 1403 */ 1404 if (init_iolog(td)) 1405 goto err; 1406 1407 if (init_io_u(td)) 1408 goto err; 1409 1410 if (o->verify_async && verify_async_init(td)) 1411 goto err; 1412 1413 if (o->ioprio) { 1414 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio); 1415 if (ret == -1) { 1416 td_verror(td, errno, "ioprio_set"); 1417 goto err; 1418 } 1419 } 1420 1421 if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) 1422 goto err; 1423 1424 errno = 0; 1425 if (nice(o->nice) == -1 && errno != 0) { 1426 td_verror(td, errno, "nice"); 1427 goto err; 1428 } 1429 1430 if (o->ioscheduler && switch_ioscheduler(td)) 1431 goto err; 1432 1433 if (!o->create_serialize && setup_files(td)) 1434 goto err; 1435 1436 if (td_io_init(td)) 1437 goto err; 1438 1439 if (init_random_map(td)) 1440 goto err; 1441 1442 if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun")) 1443 goto err; 1444 1445 if (o->pre_read) { 1446 if (pre_read_files(td) < 0) 1447 goto err; 1448 } 1449 1450 if (td->flags & TD_F_COMPRESS_LOG) 1451 tp_init(&td->tp_data); 1452 1453 fio_verify_init(td); 1454 1455 fio_gettime(&td->epoch, NULL); 1456 fio_getrusage(&td->ru_start); 1457 clear_state = 0; 1458 while (keep_running(td)) { 1459 uint64_t verify_bytes; 1460 1461 fio_gettime(&td->start, NULL); 1462 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); 1463 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); 1464 memcpy(&td->tv_cache, &td->start, sizeof(td->start)); 1465 1466 if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] || 1467 o->ratemin[DDIR_TRIM]) { 1468 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, 1469 sizeof(td->bw_sample_time)); 1470 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, 1471 sizeof(td->bw_sample_time)); 1472 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, 1473 sizeof(td->bw_sample_time)); 1474 } 1475 1476 if (clear_state) 1477 clear_io_state(td); 1478 1479 prune_io_piece_log(td); 1480 1481 if (td->o.verify_only && (td_write(td) || td_rw(td))) 1482 verify_bytes = do_dry_run(td); 1483 else 1484 verify_bytes = do_io(td); 1485 1486 clear_state = 1; 1487 1488 if (td_read(td) && td->io_bytes[DDIR_READ]) { 1489 elapsed = utime_since_now(&td->start); 1490 td->ts.runtime[DDIR_READ] += elapsed; 1491 } 1492 if (td_write(td) && td->io_bytes[DDIR_WRITE]) { 1493 elapsed = utime_since_now(&td->start); 1494 td->ts.runtime[DDIR_WRITE] += elapsed; 1495 } 1496 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { 1497 elapsed = utime_since_now(&td->start); 1498 td->ts.runtime[DDIR_TRIM] += elapsed; 1499 } 1500 1501 if (td->error || td->terminate) 1502 break; 1503 1504 if (!o->do_verify || 1505 o->verify == VERIFY_NONE || 1506 (td->io_ops->flags & FIO_UNIDIR)) 1507 continue; 1508 1509 clear_io_state(td); 1510 1511 fio_gettime(&td->start, NULL); 1512 1513 do_verify(td, verify_bytes); 1514 1515 td->ts.runtime[DDIR_READ] += utime_since_now(&td->start); 1516 1517 if (td->error || td->terminate) 1518 break; 1519 } 1520 1521 update_rusage_stat(td); 1522 td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; 1523 td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; 1524 td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; 1525 td->ts.total_run_time = mtime_since_now(&td->epoch); 1526 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; 1527 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; 1528 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; 1529 1530 fio_unpin_memory(td); 1531 1532 fio_writeout_logs(td); 1533 1534 if (td->flags & TD_F_COMPRESS_LOG) 1535 tp_exit(&td->tp_data); 1536 1537 if (o->exec_postrun) 1538 exec_string(o, o->exec_postrun, (const char *)"postrun"); 1539 1540 if (exitall_on_terminate) 1541 fio_terminate_threads(td->groupid); 1542 1543err: 1544 if (td->error) 1545 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, 1546 td->verror); 1547 1548 if (o->verify_async) 1549 verify_async_exit(td); 1550 1551 close_and_free_files(td); 1552 cleanup_io_u(td); 1553 close_ioengine(td); 1554 cgroup_shutdown(td, &cgroup_mnt); 1555 1556 if (o->cpumask_set) { 1557 int ret = fio_cpuset_exit(&o->cpumask); 1558 1559 td_verror(td, ret, "fio_cpuset_exit"); 1560 } 1561 1562 /* 1563 * do this very late, it will log file closing as well 1564 */ 1565 if (o->write_iolog_file) 1566 write_iolog_close(td); 1567 1568 fio_mutex_remove(td->rusage_sem); 1569 td->rusage_sem = NULL; 1570 1571 fio_mutex_remove(td->mutex); 1572 td->mutex = NULL; 1573 1574 td_set_runstate(td, TD_EXITED); 1575 return (void *) (uintptr_t) td->error; 1576} 1577 1578 1579/* 1580 * We cannot pass the td data into a forked process, so attach the td and 1581 * pass it to the thread worker. 1582 */ 1583static int fork_main(int shmid, int offset) 1584{ 1585 struct thread_data *td; 1586 void *data, *ret; 1587 1588#if !defined(__hpux) && !defined(CONFIG_NO_SHM) 1589 data = shmat(shmid, NULL, 0); 1590 if (data == (void *) -1) { 1591 int __err = errno; 1592 1593 perror("shmat"); 1594 return __err; 1595 } 1596#else 1597 /* 1598 * HP-UX inherits shm mappings? 1599 */ 1600 data = threads; 1601#endif 1602 1603 td = data + offset * sizeof(struct thread_data); 1604 ret = thread_main(td); 1605 shmdt(data); 1606 return (int) (uintptr_t) ret; 1607} 1608 1609static void dump_td_info(struct thread_data *td) 1610{ 1611 log_err("fio: job '%s' hasn't exited in %lu seconds, it appears to " 1612 "be stuck. Doing forceful exit of this job.\n", td->o.name, 1613 (unsigned long) time_since_now(&td->terminate_time)); 1614} 1615 1616/* 1617 * Run over the job map and reap the threads that have exited, if any. 1618 */ 1619static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, 1620 unsigned int *m_rate) 1621{ 1622 struct thread_data *td; 1623 unsigned int cputhreads, realthreads, pending; 1624 int i, status, ret; 1625 1626 /* 1627 * reap exited threads (TD_EXITED -> TD_REAPED) 1628 */ 1629 realthreads = pending = cputhreads = 0; 1630 for_each_td(td, i) { 1631 int flags = 0; 1632 1633 /* 1634 * ->io_ops is NULL for a thread that has closed its 1635 * io engine 1636 */ 1637 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) 1638 cputhreads++; 1639 else 1640 realthreads++; 1641 1642 if (!td->pid) { 1643 pending++; 1644 continue; 1645 } 1646 if (td->runstate == TD_REAPED) 1647 continue; 1648 if (td->o.use_thread) { 1649 if (td->runstate == TD_EXITED) { 1650 td_set_runstate(td, TD_REAPED); 1651 goto reaped; 1652 } 1653 continue; 1654 } 1655 1656 flags = WNOHANG; 1657 if (td->runstate == TD_EXITED) 1658 flags = 0; 1659 1660 /* 1661 * check if someone quit or got killed in an unusual way 1662 */ 1663 ret = waitpid(td->pid, &status, flags); 1664 if (ret < 0) { 1665 if (errno == ECHILD) { 1666 log_err("fio: pid=%d disappeared %d\n", 1667 (int) td->pid, td->runstate); 1668 td->sig = ECHILD; 1669 td_set_runstate(td, TD_REAPED); 1670 goto reaped; 1671 } 1672 perror("waitpid"); 1673 } else if (ret == td->pid) { 1674 if (WIFSIGNALED(status)) { 1675 int sig = WTERMSIG(status); 1676 1677 if (sig != SIGTERM && sig != SIGUSR2) 1678 log_err("fio: pid=%d, got signal=%d\n", 1679 (int) td->pid, sig); 1680 td->sig = sig; 1681 td_set_runstate(td, TD_REAPED); 1682 goto reaped; 1683 } 1684 if (WIFEXITED(status)) { 1685 if (WEXITSTATUS(status) && !td->error) 1686 td->error = WEXITSTATUS(status); 1687 1688 td_set_runstate(td, TD_REAPED); 1689 goto reaped; 1690 } 1691 } 1692 1693 /* 1694 * If the job is stuck, do a forceful timeout of it and 1695 * move on. 1696 */ 1697 if (td->terminate && 1698 time_since_now(&td->terminate_time) >= FIO_REAP_TIMEOUT) { 1699 dump_td_info(td); 1700 td_set_runstate(td, TD_REAPED); 1701 goto reaped; 1702 } 1703 1704 /* 1705 * thread is not dead, continue 1706 */ 1707 pending++; 1708 continue; 1709reaped: 1710 (*nr_running)--; 1711 (*m_rate) -= ddir_rw_sum(td->o.ratemin); 1712 (*t_rate) -= ddir_rw_sum(td->o.rate); 1713 if (!td->pid) 1714 pending--; 1715 1716 if (td->error) 1717 exit_value++; 1718 1719 done_secs += mtime_since_now(&td->epoch) / 1000; 1720 profile_td_exit(td); 1721 } 1722 1723 if (*nr_running == cputhreads && !pending && realthreads) 1724 fio_terminate_threads(TERMINATE_ALL); 1725} 1726 1727static void do_usleep(unsigned int usecs) 1728{ 1729 check_for_running_stats(); 1730 usleep(usecs); 1731} 1732 1733/* 1734 * Main function for kicking off and reaping jobs, as needed. 1735 */ 1736static void run_threads(void) 1737{ 1738 struct thread_data *td; 1739 unsigned int i, todo, nr_running, m_rate, t_rate, nr_started; 1740 uint64_t spent; 1741 1742 if (fio_gtod_offload && fio_start_gtod_thread()) 1743 return; 1744 1745 fio_idle_prof_init(); 1746 1747 set_sig_handlers(); 1748 1749 nr_thread = nr_process = 0; 1750 for_each_td(td, i) { 1751 if (td->o.use_thread) 1752 nr_thread++; 1753 else 1754 nr_process++; 1755 } 1756 1757 if (output_format == FIO_OUTPUT_NORMAL) { 1758 log_info("Starting "); 1759 if (nr_thread) 1760 log_info("%d thread%s", nr_thread, 1761 nr_thread > 1 ? "s" : ""); 1762 if (nr_process) { 1763 if (nr_thread) 1764 log_info(" and "); 1765 log_info("%d process%s", nr_process, 1766 nr_process > 1 ? "es" : ""); 1767 } 1768 log_info("\n"); 1769 log_info_flush(); 1770 } 1771 1772 todo = thread_number; 1773 nr_running = 0; 1774 nr_started = 0; 1775 m_rate = t_rate = 0; 1776 1777 for_each_td(td, i) { 1778 print_status_init(td->thread_number - 1); 1779 1780 if (!td->o.create_serialize) 1781 continue; 1782 1783 /* 1784 * do file setup here so it happens sequentially, 1785 * we don't want X number of threads getting their 1786 * client data interspersed on disk 1787 */ 1788 if (setup_files(td)) { 1789 exit_value++; 1790 if (td->error) 1791 log_err("fio: pid=%d, err=%d/%s\n", 1792 (int) td->pid, td->error, td->verror); 1793 td_set_runstate(td, TD_REAPED); 1794 todo--; 1795 } else { 1796 struct fio_file *f; 1797 unsigned int j; 1798 1799 /* 1800 * for sharing to work, each job must always open 1801 * its own files. so close them, if we opened them 1802 * for creation 1803 */ 1804 for_each_file(td, f, j) { 1805 if (fio_file_open(f)) 1806 td_io_close_file(td, f); 1807 } 1808 } 1809 } 1810 1811 /* start idle threads before io threads start to run */ 1812 fio_idle_prof_start(); 1813 1814 set_genesis_time(); 1815 1816 while (todo) { 1817 struct thread_data *map[REAL_MAX_JOBS]; 1818 struct timeval this_start; 1819 int this_jobs = 0, left; 1820 1821 /* 1822 * create threads (TD_NOT_CREATED -> TD_CREATED) 1823 */ 1824 for_each_td(td, i) { 1825 if (td->runstate != TD_NOT_CREATED) 1826 continue; 1827 1828 /* 1829 * never got a chance to start, killed by other 1830 * thread for some reason 1831 */ 1832 if (td->terminate) { 1833 todo--; 1834 continue; 1835 } 1836 1837 if (td->o.start_delay) { 1838 spent = utime_since_genesis(); 1839 1840 if (td->o.start_delay > spent) 1841 continue; 1842 } 1843 1844 if (td->o.stonewall && (nr_started || nr_running)) { 1845 dprint(FD_PROCESS, "%s: stonewall wait\n", 1846 td->o.name); 1847 break; 1848 } 1849 1850 init_disk_util(td); 1851 1852 td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); 1853 td->update_rusage = 0; 1854 1855 /* 1856 * Set state to created. Thread will transition 1857 * to TD_INITIALIZED when it's done setting up. 1858 */ 1859 td_set_runstate(td, TD_CREATED); 1860 map[this_jobs++] = td; 1861 nr_started++; 1862 1863 if (td->o.use_thread) { 1864 int ret; 1865 1866 dprint(FD_PROCESS, "will pthread_create\n"); 1867 ret = pthread_create(&td->thread, NULL, 1868 thread_main, td); 1869 if (ret) { 1870 log_err("pthread_create: %s\n", 1871 strerror(ret)); 1872 nr_started--; 1873 break; 1874 } 1875 ret = pthread_detach(td->thread); 1876 if (ret) 1877 log_err("pthread_detach: %s", 1878 strerror(ret)); 1879 } else { 1880 pid_t pid; 1881 dprint(FD_PROCESS, "will fork\n"); 1882 pid = fork(); 1883 if (!pid) { 1884 int ret = fork_main(shm_id, i); 1885 1886 _exit(ret); 1887 } else if (i == fio_debug_jobno) 1888 *fio_debug_jobp = pid; 1889 } 1890 dprint(FD_MUTEX, "wait on startup_mutex\n"); 1891 if (fio_mutex_down_timeout(startup_mutex, 10)) { 1892 log_err("fio: job startup hung? exiting.\n"); 1893 fio_terminate_threads(TERMINATE_ALL); 1894 fio_abort = 1; 1895 nr_started--; 1896 break; 1897 } 1898 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 1899 } 1900 1901 /* 1902 * Wait for the started threads to transition to 1903 * TD_INITIALIZED. 1904 */ 1905 fio_gettime(&this_start, NULL); 1906 left = this_jobs; 1907 while (left && !fio_abort) { 1908 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT) 1909 break; 1910 1911 do_usleep(100000); 1912 1913 for (i = 0; i < this_jobs; i++) { 1914 td = map[i]; 1915 if (!td) 1916 continue; 1917 if (td->runstate == TD_INITIALIZED) { 1918 map[i] = NULL; 1919 left--; 1920 } else if (td->runstate >= TD_EXITED) { 1921 map[i] = NULL; 1922 left--; 1923 todo--; 1924 nr_running++; /* work-around... */ 1925 } 1926 } 1927 } 1928 1929 if (left) { 1930 log_err("fio: %d job%s failed to start\n", left, 1931 left > 1 ? "s" : ""); 1932 for (i = 0; i < this_jobs; i++) { 1933 td = map[i]; 1934 if (!td) 1935 continue; 1936 kill(td->pid, SIGTERM); 1937 } 1938 break; 1939 } 1940 1941 /* 1942 * start created threads (TD_INITIALIZED -> TD_RUNNING). 1943 */ 1944 for_each_td(td, i) { 1945 if (td->runstate != TD_INITIALIZED) 1946 continue; 1947 1948 if (in_ramp_time(td)) 1949 td_set_runstate(td, TD_RAMP); 1950 else 1951 td_set_runstate(td, TD_RUNNING); 1952 nr_running++; 1953 nr_started--; 1954 m_rate += ddir_rw_sum(td->o.ratemin); 1955 t_rate += ddir_rw_sum(td->o.rate); 1956 todo--; 1957 fio_mutex_up(td->mutex); 1958 } 1959 1960 reap_threads(&nr_running, &t_rate, &m_rate); 1961 1962 if (todo) 1963 do_usleep(100000); 1964 } 1965 1966 while (nr_running) { 1967 reap_threads(&nr_running, &t_rate, &m_rate); 1968 do_usleep(10000); 1969 } 1970 1971 fio_idle_prof_stop(); 1972 1973 update_io_ticks(); 1974} 1975 1976static void wait_for_disk_thread_exit(void) 1977{ 1978 void *ret; 1979 1980 disk_util_start_exit(); 1981 pthread_cond_signal(&du_cond); 1982 pthread_join(disk_util_thread, &ret); 1983} 1984 1985static void free_disk_util(void) 1986{ 1987 disk_util_prune_entries(); 1988 1989 pthread_cond_destroy(&du_cond); 1990} 1991 1992static void *disk_thread_main(void *data) 1993{ 1994 int ret = 0; 1995 1996 fio_mutex_up(startup_mutex); 1997 1998 while (!ret) { 1999 uint64_t sec = DISK_UTIL_MSEC / 1000; 2000 uint64_t nsec = (DISK_UTIL_MSEC % 1000) * 1000000; 2001 struct timespec ts; 2002 struct timeval tv; 2003 2004 gettimeofday(&tv, NULL); 2005 ts.tv_sec = tv.tv_sec + sec; 2006 ts.tv_nsec = (tv.tv_usec * 1000) + nsec; 2007 if (ts.tv_nsec > 1000000000ULL) { 2008 ts.tv_nsec -= 1000000000ULL; 2009 ts.tv_sec++; 2010 } 2011 2012 ret = pthread_cond_timedwait(&du_cond, &du_lock, &ts); 2013 if (ret != ETIMEDOUT) { 2014 printf("disk thread should exit %d\n", ret); 2015 break; 2016 } 2017 2018 ret = update_io_ticks(); 2019 2020 if (!is_backend) 2021 print_thread_status(); 2022 } 2023 2024 return NULL; 2025} 2026 2027static int create_disk_util_thread(void) 2028{ 2029 int ret; 2030 2031 setup_disk_util(); 2032 2033 disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 2034 2035 pthread_cond_init(&du_cond, NULL); 2036 pthread_mutex_init(&du_lock, NULL); 2037 2038 ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); 2039 if (ret) { 2040 fio_mutex_remove(disk_thread_mutex); 2041 log_err("Can't create disk util thread: %s\n", strerror(ret)); 2042 return 1; 2043 } 2044 2045 dprint(FD_MUTEX, "wait on startup_mutex\n"); 2046 fio_mutex_down(startup_mutex); 2047 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 2048 return 0; 2049} 2050 2051int fio_backend(void) 2052{ 2053 struct thread_data *td; 2054 int i; 2055 2056 if (exec_profile) { 2057 if (load_profile(exec_profile)) 2058 return 1; 2059 free(exec_profile); 2060 exec_profile = NULL; 2061 } 2062 if (!thread_number) 2063 return 0; 2064 2065 if (write_bw_log) { 2066 struct log_params p = { 2067 .log_type = IO_LOG_TYPE_BW, 2068 }; 2069 2070 setup_log(&agg_io_log[DDIR_READ], &p, "agg-read_bw.log"); 2071 setup_log(&agg_io_log[DDIR_WRITE], &p, "agg-write_bw.log"); 2072 setup_log(&agg_io_log[DDIR_TRIM], &p, "agg-trim_bw.log"); 2073 } 2074 2075 startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 2076 if (startup_mutex == NULL) 2077 return 1; 2078 2079 set_genesis_time(); 2080 stat_init(); 2081 create_disk_util_thread(); 2082 2083 cgroup_list = smalloc(sizeof(*cgroup_list)); 2084 INIT_FLIST_HEAD(cgroup_list); 2085 2086 run_threads(); 2087 2088 wait_for_disk_thread_exit(); 2089 2090 if (!fio_abort) { 2091 __show_run_stats(); 2092 if (write_bw_log) { 2093 int i; 2094 2095 for (i = 0; i < DDIR_RWDIR_CNT; i++) { 2096 struct io_log *log = agg_io_log[i]; 2097 2098 flush_log(log); 2099 free_log(log); 2100 } 2101 } 2102 } 2103 2104 for_each_td(td, i) 2105 fio_options_free(td); 2106 2107 free_disk_util(); 2108 cgroup_kill(cgroup_list); 2109 sfree(cgroup_list); 2110 sfree(cgroup_mnt); 2111 2112 fio_mutex_remove(startup_mutex); 2113 fio_mutex_remove(disk_thread_mutex); 2114 stat_exit(); 2115 return exit_value; 2116} 2117