backend.c revision 78a6469cfc9094763320e61c60f9aaef0ece9689
1/* 2 * fio - the flexible io tester 3 * 4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de> 5 * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk> 6 * 7 * The license below covers all files distributed with fio unless otherwise 8 * noted in the file itself. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24#include <unistd.h> 25#include <fcntl.h> 26#include <string.h> 27#include <limits.h> 28#include <signal.h> 29#include <time.h> 30#include <locale.h> 31#include <assert.h> 32#include <time.h> 33#include <inttypes.h> 34#include <sys/stat.h> 35#include <sys/wait.h> 36#include <sys/ipc.h> 37#include <sys/mman.h> 38 39#include "fio.h" 40#ifndef FIO_NO_HAVE_SHM_H 41#include <sys/shm.h> 42#endif 43#include "hash.h" 44#include "smalloc.h" 45#include "verify.h" 46#include "trim.h" 47#include "diskutil.h" 48#include "cgroup.h" 49#include "profile.h" 50#include "lib/rand.h" 51#include "memalign.h" 52#include "server.h" 53#include "lib/getrusage.h" 54#include "idletime.h" 55 56static pthread_t disk_util_thread; 57static struct fio_mutex *disk_thread_mutex; 58static struct fio_mutex *startup_mutex; 59static struct fio_mutex *writeout_mutex; 60static struct flist_head *cgroup_list; 61static char *cgroup_mnt; 62static int exit_value; 63static volatile int fio_abort; 64static unsigned int nr_process = 0; 65static unsigned int nr_thread = 0; 66 67struct io_log *agg_io_log[DDIR_RWDIR_CNT]; 68 69int groupid = 0; 70unsigned int thread_number = 0; 71unsigned int stat_number = 0; 72int shm_id = 0; 73int temp_stall_ts; 74unsigned long done_secs = 0; 75volatile int disk_util_exit = 0; 76 77#define PAGE_ALIGN(buf) \ 78 (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) 79 80#define JOB_START_TIMEOUT (5 * 1000) 81 82static void sig_int(int sig) 83{ 84 if (threads) { 85 if (is_backend) 86 fio_server_got_signal(sig); 87 else { 88 log_info("\nfio: terminating on signal %d\n", sig); 89 fflush(stdout); 90 exit_value = 128; 91 } 92 93 fio_terminate_threads(TERMINATE_ALL); 94 } 95} 96 97static void sig_show_status(int sig) 98{ 99 show_running_run_stats(); 100} 101 102static void set_sig_handlers(void) 103{ 104 struct sigaction act; 105 106 memset(&act, 0, sizeof(act)); 107 act.sa_handler = sig_int; 108 act.sa_flags = SA_RESTART; 109 sigaction(SIGINT, &act, NULL); 110 111 memset(&act, 0, sizeof(act)); 112 act.sa_handler = sig_int; 113 act.sa_flags = SA_RESTART; 114 sigaction(SIGTERM, &act, NULL); 115 116/* Windows uses SIGBREAK as a quit signal from other applications */ 117#ifdef WIN32 118 memset(&act, 0, sizeof(act)); 119 act.sa_handler = sig_int; 120 act.sa_flags = SA_RESTART; 121 sigaction(SIGBREAK, &act, NULL); 122#endif 123 124 memset(&act, 0, sizeof(act)); 125 act.sa_handler = sig_show_status; 126 act.sa_flags = SA_RESTART; 127 sigaction(SIGUSR1, &act, NULL); 128 129 if (is_backend) { 130 memset(&act, 0, sizeof(act)); 131 act.sa_handler = sig_int; 132 act.sa_flags = SA_RESTART; 133 sigaction(SIGPIPE, &act, NULL); 134 } 135} 136 137/* 138 * Check if we are above the minimum rate given. 139 */ 140static int __check_min_rate(struct thread_data *td, struct timeval *now, 141 enum fio_ddir ddir) 142{ 143 unsigned long long bytes = 0; 144 unsigned long iops = 0; 145 unsigned long spent; 146 unsigned long rate; 147 unsigned int ratemin = 0; 148 unsigned int rate_iops = 0; 149 unsigned int rate_iops_min = 0; 150 151 assert(ddir_rw(ddir)); 152 153 if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) 154 return 0; 155 156 /* 157 * allow a 2 second settle period in the beginning 158 */ 159 if (mtime_since(&td->start, now) < 2000) 160 return 0; 161 162 iops += td->this_io_blocks[ddir]; 163 bytes += td->this_io_bytes[ddir]; 164 ratemin += td->o.ratemin[ddir]; 165 rate_iops += td->o.rate_iops[ddir]; 166 rate_iops_min += td->o.rate_iops_min[ddir]; 167 168 /* 169 * if rate blocks is set, sample is running 170 */ 171 if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { 172 spent = mtime_since(&td->lastrate[ddir], now); 173 if (spent < td->o.ratecycle) 174 return 0; 175 176 if (td->o.rate[ddir]) { 177 /* 178 * check bandwidth specified rate 179 */ 180 if (bytes < td->rate_bytes[ddir]) { 181 log_err("%s: min rate %u not met\n", td->o.name, 182 ratemin); 183 return 1; 184 } else { 185 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; 186 if (rate < ratemin || 187 bytes < td->rate_bytes[ddir]) { 188 log_err("%s: min rate %u not met, got" 189 " %luKB/sec\n", td->o.name, 190 ratemin, rate); 191 return 1; 192 } 193 } 194 } else { 195 /* 196 * checks iops specified rate 197 */ 198 if (iops < rate_iops) { 199 log_err("%s: min iops rate %u not met\n", 200 td->o.name, rate_iops); 201 return 1; 202 } else { 203 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; 204 if (rate < rate_iops_min || 205 iops < td->rate_blocks[ddir]) { 206 log_err("%s: min iops rate %u not met," 207 " got %lu\n", td->o.name, 208 rate_iops_min, rate); 209 } 210 } 211 } 212 } 213 214 td->rate_bytes[ddir] = bytes; 215 td->rate_blocks[ddir] = iops; 216 memcpy(&td->lastrate[ddir], now, sizeof(*now)); 217 return 0; 218} 219 220static int check_min_rate(struct thread_data *td, struct timeval *now, 221 uint64_t *bytes_done) 222{ 223 int ret = 0; 224 225 if (bytes_done[DDIR_READ]) 226 ret |= __check_min_rate(td, now, DDIR_READ); 227 if (bytes_done[DDIR_WRITE]) 228 ret |= __check_min_rate(td, now, DDIR_WRITE); 229 if (bytes_done[DDIR_TRIM]) 230 ret |= __check_min_rate(td, now, DDIR_TRIM); 231 232 return ret; 233} 234 235/* 236 * When job exits, we can cancel the in-flight IO if we are using async 237 * io. Attempt to do so. 238 */ 239static void cleanup_pending_aio(struct thread_data *td) 240{ 241 int r; 242 243 /* 244 * get immediately available events, if any 245 */ 246 r = io_u_queued_complete(td, 0, NULL); 247 if (r < 0) 248 return; 249 250 /* 251 * now cancel remaining active events 252 */ 253 if (td->io_ops->cancel) { 254 struct io_u *io_u; 255 int i; 256 257 io_u_qiter(&td->io_u_all, io_u, i) { 258 if (io_u->flags & IO_U_F_FLIGHT) { 259 r = td->io_ops->cancel(td, io_u); 260 if (!r) 261 put_io_u(td, io_u); 262 } 263 } 264 } 265 266 if (td->cur_depth) 267 r = io_u_queued_complete(td, td->cur_depth, NULL); 268} 269 270/* 271 * Helper to handle the final sync of a file. Works just like the normal 272 * io path, just does everything sync. 273 */ 274static int fio_io_sync(struct thread_data *td, struct fio_file *f) 275{ 276 struct io_u *io_u = __get_io_u(td); 277 int ret; 278 279 if (!io_u) 280 return 1; 281 282 io_u->ddir = DDIR_SYNC; 283 io_u->file = f; 284 285 if (td_io_prep(td, io_u)) { 286 put_io_u(td, io_u); 287 return 1; 288 } 289 290requeue: 291 ret = td_io_queue(td, io_u); 292 if (ret < 0) { 293 td_verror(td, io_u->error, "td_io_queue"); 294 put_io_u(td, io_u); 295 return 1; 296 } else if (ret == FIO_Q_QUEUED) { 297 if (io_u_queued_complete(td, 1, NULL) < 0) 298 return 1; 299 } else if (ret == FIO_Q_COMPLETED) { 300 if (io_u->error) { 301 td_verror(td, io_u->error, "td_io_queue"); 302 return 1; 303 } 304 305 if (io_u_sync_complete(td, io_u, NULL) < 0) 306 return 1; 307 } else if (ret == FIO_Q_BUSY) { 308 if (td_io_commit(td)) 309 return 1; 310 goto requeue; 311 } 312 313 return 0; 314} 315 316static int fio_file_fsync(struct thread_data *td, struct fio_file *f) 317{ 318 int ret; 319 320 if (fio_file_open(f)) 321 return fio_io_sync(td, f); 322 323 if (td_io_open_file(td, f)) 324 return 1; 325 326 ret = fio_io_sync(td, f); 327 td_io_close_file(td, f); 328 return ret; 329} 330 331static inline void __update_tv_cache(struct thread_data *td) 332{ 333 fio_gettime(&td->tv_cache, NULL); 334} 335 336static inline void update_tv_cache(struct thread_data *td) 337{ 338 if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) 339 __update_tv_cache(td); 340} 341 342static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) 343{ 344 if (in_ramp_time(td)) 345 return 0; 346 if (!td->o.timeout) 347 return 0; 348 if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) 349 return 1; 350 351 return 0; 352} 353 354static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, 355 int *retptr) 356{ 357 int ret = *retptr; 358 359 if (ret < 0 || td->error) { 360 int err = td->error; 361 enum error_type_bit eb; 362 363 if (ret < 0) 364 err = -ret; 365 366 eb = td_error_type(ddir, err); 367 if (!(td->o.continue_on_error & (1 << eb))) 368 return 1; 369 370 if (td_non_fatal_error(td, eb, err)) { 371 /* 372 * Continue with the I/Os in case of 373 * a non fatal error. 374 */ 375 update_error_count(td, err); 376 td_clear_error(td); 377 *retptr = 0; 378 return 0; 379 } else if (td->o.fill_device && err == ENOSPC) { 380 /* 381 * We expect to hit this error if 382 * fill_device option is set. 383 */ 384 td_clear_error(td); 385 td->terminate = 1; 386 return 1; 387 } else { 388 /* 389 * Stop the I/O in case of a fatal 390 * error. 391 */ 392 update_error_count(td, err); 393 return 1; 394 } 395 } 396 397 return 0; 398} 399 400static void check_update_rusage(struct thread_data *td) 401{ 402 if (td->update_rusage) { 403 td->update_rusage = 0; 404 update_rusage_stat(td); 405 fio_mutex_up(td->rusage_sem); 406 } 407} 408 409/* 410 * The main verify engine. Runs over the writes we previously submitted, 411 * reads the blocks back in, and checks the crc/md5 of the data. 412 */ 413static void do_verify(struct thread_data *td, uint64_t verify_bytes) 414{ 415 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 416 struct fio_file *f; 417 struct io_u *io_u; 418 int ret, min_events; 419 unsigned int i; 420 421 dprint(FD_VERIFY, "starting loop\n"); 422 423 /* 424 * sync io first and invalidate cache, to make sure we really 425 * read from disk. 426 */ 427 for_each_file(td, f, i) { 428 if (!fio_file_open(f)) 429 continue; 430 if (fio_io_sync(td, f)) 431 break; 432 if (file_invalidate_cache(td, f)) 433 break; 434 } 435 436 check_update_rusage(td); 437 438 if (td->error) 439 return; 440 441 td_set_runstate(td, TD_VERIFYING); 442 443 io_u = NULL; 444 while (!td->terminate) { 445 enum fio_ddir ddir; 446 int ret2, full; 447 448 update_tv_cache(td); 449 check_update_rusage(td); 450 451 if (runtime_exceeded(td, &td->tv_cache)) { 452 __update_tv_cache(td); 453 if (runtime_exceeded(td, &td->tv_cache)) { 454 td->terminate = 1; 455 break; 456 } 457 } 458 459 if (flow_threshold_exceeded(td)) 460 continue; 461 462 if (!td->o.experimental_verify) { 463 io_u = __get_io_u(td); 464 if (!io_u) 465 break; 466 467 if (get_next_verify(td, io_u)) { 468 put_io_u(td, io_u); 469 break; 470 } 471 472 if (td_io_prep(td, io_u)) { 473 put_io_u(td, io_u); 474 break; 475 } 476 } else { 477 if (ddir_rw_sum(bytes_done) + td->o.rw_min_bs > verify_bytes) 478 break; 479 480 while ((io_u = get_io_u(td)) != NULL) { 481 /* 482 * We are only interested in the places where 483 * we wrote or trimmed IOs. Turn those into 484 * reads for verification purposes. 485 */ 486 if (io_u->ddir == DDIR_READ) { 487 /* 488 * Pretend we issued it for rwmix 489 * accounting 490 */ 491 td->io_issues[DDIR_READ]++; 492 put_io_u(td, io_u); 493 continue; 494 } else if (io_u->ddir == DDIR_TRIM) { 495 io_u->ddir = DDIR_READ; 496 io_u->flags |= IO_U_F_TRIMMED; 497 break; 498 } else if (io_u->ddir == DDIR_WRITE) { 499 io_u->ddir = DDIR_READ; 500 break; 501 } else { 502 put_io_u(td, io_u); 503 continue; 504 } 505 } 506 507 if (!io_u) 508 break; 509 } 510 511 if (td->o.verify_async) 512 io_u->end_io = verify_io_u_async; 513 else 514 io_u->end_io = verify_io_u; 515 516 ddir = io_u->ddir; 517 518 ret = td_io_queue(td, io_u); 519 switch (ret) { 520 case FIO_Q_COMPLETED: 521 if (io_u->error) { 522 ret = -io_u->error; 523 clear_io_u(td, io_u); 524 } else if (io_u->resid) { 525 int bytes = io_u->xfer_buflen - io_u->resid; 526 527 /* 528 * zero read, fail 529 */ 530 if (!bytes) { 531 td_verror(td, EIO, "full resid"); 532 put_io_u(td, io_u); 533 break; 534 } 535 536 io_u->xfer_buflen = io_u->resid; 537 io_u->xfer_buf += bytes; 538 io_u->offset += bytes; 539 540 if (ddir_rw(io_u->ddir)) 541 td->ts.short_io_u[io_u->ddir]++; 542 543 f = io_u->file; 544 if (io_u->offset == f->real_file_size) 545 goto sync_done; 546 547 requeue_io_u(td, &io_u); 548 } else { 549sync_done: 550 ret = io_u_sync_complete(td, io_u, bytes_done); 551 if (ret < 0) 552 break; 553 } 554 continue; 555 case FIO_Q_QUEUED: 556 break; 557 case FIO_Q_BUSY: 558 requeue_io_u(td, &io_u); 559 ret2 = td_io_commit(td); 560 if (ret2 < 0) 561 ret = ret2; 562 break; 563 default: 564 assert(ret < 0); 565 td_verror(td, -ret, "td_io_queue"); 566 break; 567 } 568 569 if (break_on_this_error(td, ddir, &ret)) 570 break; 571 572 /* 573 * if we can queue more, do so. but check if there are 574 * completed io_u's first. Note that we can get BUSY even 575 * without IO queued, if the system is resource starved. 576 */ 577 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 578 if (full || !td->o.iodepth_batch_complete) { 579 min_events = min(td->o.iodepth_batch_complete, 580 td->cur_depth); 581 /* 582 * if the queue is full, we MUST reap at least 1 event 583 */ 584 if (full && !min_events) 585 min_events = 1; 586 587 do { 588 /* 589 * Reap required number of io units, if any, 590 * and do the verification on them through 591 * the callback handler 592 */ 593 if (io_u_queued_complete(td, min_events, bytes_done) < 0) { 594 ret = -1; 595 break; 596 } 597 } while (full && (td->cur_depth > td->o.iodepth_low)); 598 } 599 if (ret < 0) 600 break; 601 } 602 603 check_update_rusage(td); 604 605 if (!td->error) { 606 min_events = td->cur_depth; 607 608 if (min_events) 609 ret = io_u_queued_complete(td, min_events, NULL); 610 } else 611 cleanup_pending_aio(td); 612 613 td_set_runstate(td, TD_RUNNING); 614 615 dprint(FD_VERIFY, "exiting loop\n"); 616} 617 618static int io_bytes_exceeded(struct thread_data *td) 619{ 620 unsigned long long bytes; 621 622 if (td_rw(td)) 623 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; 624 else if (td_write(td)) 625 bytes = td->this_io_bytes[DDIR_WRITE]; 626 else if (td_read(td)) 627 bytes = td->this_io_bytes[DDIR_READ]; 628 else 629 bytes = td->this_io_bytes[DDIR_TRIM]; 630 631 return bytes >= td->o.size; 632} 633 634/* 635 * Main IO worker function. It retrieves io_u's to process and queues 636 * and reaps them, checking for rate and errors along the way. 637 * 638 * Returns number of bytes written and trimmed. 639 */ 640static uint64_t do_io(struct thread_data *td) 641{ 642 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 643 unsigned int i; 644 int ret = 0; 645 uint64_t total_bytes, bytes_issued = 0; 646 647 if (in_ramp_time(td)) 648 td_set_runstate(td, TD_RAMP); 649 else 650 td_set_runstate(td, TD_RUNNING); 651 652 lat_target_init(td); 653 654 /* 655 * If verify_backlog is enabled, we'll run the verify in this 656 * handler as well. For that case, we may need up to twice the 657 * amount of bytes. 658 */ 659 total_bytes = td->o.size; 660 if (td->o.verify != VERIFY_NONE && 661 (td_write(td) && td->o.verify_backlog)) 662 total_bytes += td->o.size; 663 664 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || 665 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || 666 td->o.time_based) { 667 struct timeval comp_time; 668 int min_evts = 0; 669 struct io_u *io_u; 670 int ret2, full; 671 enum fio_ddir ddir; 672 673 check_update_rusage(td); 674 675 if (td->terminate || td->done) 676 break; 677 678 update_tv_cache(td); 679 680 if (runtime_exceeded(td, &td->tv_cache)) { 681 __update_tv_cache(td); 682 if (runtime_exceeded(td, &td->tv_cache)) { 683 td->terminate = 1; 684 break; 685 } 686 } 687 688 if (flow_threshold_exceeded(td)) 689 continue; 690 691 if (bytes_issued >= total_bytes) 692 break; 693 694 io_u = get_io_u(td); 695 if (!io_u) { 696 if (td->o.latency_target) 697 goto reap; 698 break; 699 } 700 701 ddir = io_u->ddir; 702 703 /* 704 * Add verification end_io handler if: 705 * - Asked to verify (!td_rw(td)) 706 * - Or the io_u is from our verify list (mixed write/ver) 707 */ 708 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && 709 ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { 710 711 if (!td->o.verify_pattern_bytes) { 712 io_u->rand_seed = __rand(&td->__verify_state); 713 if (sizeof(int) != sizeof(long *)) 714 io_u->rand_seed *= __rand(&td->__verify_state); 715 } 716 717 if (td->o.verify_async) 718 io_u->end_io = verify_io_u_async; 719 else 720 io_u->end_io = verify_io_u; 721 td_set_runstate(td, TD_VERIFYING); 722 } else if (in_ramp_time(td)) 723 td_set_runstate(td, TD_RAMP); 724 else 725 td_set_runstate(td, TD_RUNNING); 726 727 /* 728 * Verify_backlog disabled: We need to log rand seed before the 729 * actual IO to be able to replay it correctly in the verify phase. 730 */ 731 if (td_write(td) && io_u->ddir == DDIR_WRITE && 732 td->o.do_verify && 733 td->o.verify != VERIFY_NONE && 734 !td->o.experimental_verify && 735 !(td->flags & TD_F_VER_BACKLOG)) 736 log_io_piece(td, io_u); 737 738 ret = td_io_queue(td, io_u); 739 switch (ret) { 740 case FIO_Q_COMPLETED: 741 if (io_u->error) { 742 ret = -io_u->error; 743 clear_io_u(td, io_u); 744 } else if (io_u->resid) { 745 int bytes = io_u->xfer_buflen - io_u->resid; 746 struct fio_file *f = io_u->file; 747 748 bytes_issued += bytes; 749 /* 750 * zero read, fail 751 */ 752 if (!bytes) { 753 td_verror(td, EIO, "full resid"); 754 put_io_u(td, io_u); 755 break; 756 } 757 758 io_u->xfer_buflen = io_u->resid; 759 io_u->xfer_buf += bytes; 760 io_u->offset += bytes; 761 762 if (ddir_rw(io_u->ddir)) 763 td->ts.short_io_u[io_u->ddir]++; 764 765 if (io_u->offset == f->real_file_size) 766 goto sync_done; 767 768 requeue_io_u(td, &io_u); 769 } else { 770sync_done: 771 if (__should_check_rate(td, DDIR_READ) || 772 __should_check_rate(td, DDIR_WRITE) || 773 __should_check_rate(td, DDIR_TRIM)) 774 fio_gettime(&comp_time, NULL); 775 776 ret = io_u_sync_complete(td, io_u, bytes_done); 777 if (ret < 0) 778 break; 779 bytes_issued += io_u->xfer_buflen; 780 } 781 break; 782 case FIO_Q_QUEUED: 783 /* 784 * if the engine doesn't have a commit hook, 785 * the io_u is really queued. if it does have such 786 * a hook, it has to call io_u_queued() itself. 787 */ 788 if (td->io_ops->commit == NULL) 789 io_u_queued(td, io_u); 790 bytes_issued += io_u->xfer_buflen; 791 break; 792 case FIO_Q_BUSY: 793 requeue_io_u(td, &io_u); 794 ret2 = td_io_commit(td); 795 if (ret2 < 0) 796 ret = ret2; 797 break; 798 default: 799 assert(ret < 0); 800 put_io_u(td, io_u); 801 break; 802 } 803 804 if (break_on_this_error(td, ddir, &ret)) 805 break; 806 807 /* 808 * See if we need to complete some commands. Note that we 809 * can get BUSY even without IO queued, if the system is 810 * resource starved. 811 */ 812reap: 813 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 814 if (full || !td->o.iodepth_batch_complete) { 815 min_evts = min(td->o.iodepth_batch_complete, 816 td->cur_depth); 817 /* 818 * if the queue is full, we MUST reap at least 1 event 819 */ 820 if (full && !min_evts) 821 min_evts = 1; 822 823 if (__should_check_rate(td, DDIR_READ) || 824 __should_check_rate(td, DDIR_WRITE) || 825 __should_check_rate(td, DDIR_TRIM)) 826 fio_gettime(&comp_time, NULL); 827 828 do { 829 ret = io_u_queued_complete(td, min_evts, bytes_done); 830 if (ret < 0) 831 break; 832 833 } while (full && (td->cur_depth > td->o.iodepth_low)); 834 } 835 836 if (ret < 0) 837 break; 838 if (!ddir_rw_sum(bytes_done) && !(td->io_ops->flags & FIO_NOIO)) 839 continue; 840 841 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { 842 if (check_min_rate(td, &comp_time, bytes_done)) { 843 if (exitall_on_terminate) 844 fio_terminate_threads(td->groupid); 845 td_verror(td, EIO, "check_min_rate"); 846 break; 847 } 848 } 849 if (!in_ramp_time(td) && td->o.latency_target) 850 lat_target_check(td); 851 852 if (td->o.thinktime) { 853 unsigned long long b; 854 855 b = ddir_rw_sum(td->io_blocks); 856 if (!(b % td->o.thinktime_blocks)) { 857 int left; 858 859 io_u_quiesce(td); 860 861 if (td->o.thinktime_spin) 862 usec_spin(td->o.thinktime_spin); 863 864 left = td->o.thinktime - td->o.thinktime_spin; 865 if (left) 866 usec_sleep(td, left); 867 } 868 } 869 } 870 871 check_update_rusage(td); 872 873 if (td->trim_entries) 874 log_err("fio: %lu trim entries leaked?\n", td->trim_entries); 875 876 if (td->o.fill_device && td->error == ENOSPC) { 877 td->error = 0; 878 td->terminate = 1; 879 } 880 if (!td->error) { 881 struct fio_file *f; 882 883 i = td->cur_depth; 884 if (i) { 885 ret = io_u_queued_complete(td, i, bytes_done); 886 if (td->o.fill_device && td->error == ENOSPC) 887 td->error = 0; 888 } 889 890 if (should_fsync(td) && td->o.end_fsync) { 891 td_set_runstate(td, TD_FSYNCING); 892 893 for_each_file(td, f, i) { 894 if (!fio_file_fsync(td, f)) 895 continue; 896 897 log_err("fio: end_fsync failed for file %s\n", 898 f->file_name); 899 } 900 } 901 } else 902 cleanup_pending_aio(td); 903 904 /* 905 * stop job if we failed doing any IO 906 */ 907 if (!ddir_rw_sum(td->this_io_bytes)) 908 td->done = 1; 909 910 return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; 911} 912 913static void cleanup_io_u(struct thread_data *td) 914{ 915 struct io_u *io_u; 916 917 while ((io_u = io_u_qpop(&td->io_u_freelist)) != NULL) { 918 919 if (td->io_ops->io_u_free) 920 td->io_ops->io_u_free(td, io_u); 921 922 fio_memfree(io_u, sizeof(*io_u)); 923 } 924 925 free_io_mem(td); 926 927 io_u_rexit(&td->io_u_requeues); 928 io_u_qexit(&td->io_u_freelist); 929 io_u_qexit(&td->io_u_all); 930} 931 932static int init_io_u(struct thread_data *td) 933{ 934 struct io_u *io_u; 935 unsigned int max_bs, min_write; 936 int cl_align, i, max_units; 937 int data_xfer = 1, err; 938 char *p; 939 940 max_units = td->o.iodepth; 941 max_bs = td_max_bs(td); 942 min_write = td->o.min_bs[DDIR_WRITE]; 943 td->orig_buffer_size = (unsigned long long) max_bs 944 * (unsigned long long) max_units; 945 946 if ((td->io_ops->flags & FIO_NOIO) || !(td_read(td) || td_write(td))) 947 data_xfer = 0; 948 949 err = 0; 950 err += io_u_rinit(&td->io_u_requeues, td->o.iodepth); 951 err += io_u_qinit(&td->io_u_freelist, td->o.iodepth); 952 err += io_u_qinit(&td->io_u_all, td->o.iodepth); 953 954 if (err) { 955 log_err("fio: failed setting up IO queues\n"); 956 return 1; 957 } 958 959 /* 960 * if we may later need to do address alignment, then add any 961 * possible adjustment here so that we don't cause a buffer 962 * overflow later. this adjustment may be too much if we get 963 * lucky and the allocator gives us an aligned address. 964 */ 965 if (td->o.odirect || td->o.mem_align || td->o.oatomic || 966 (td->io_ops->flags & FIO_RAWIO)) 967 td->orig_buffer_size += page_mask + td->o.mem_align; 968 969 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { 970 unsigned long bs; 971 972 bs = td->orig_buffer_size + td->o.hugepage_size - 1; 973 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); 974 } 975 976 if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { 977 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); 978 return 1; 979 } 980 981 if (data_xfer && allocate_io_mem(td)) 982 return 1; 983 984 if (td->o.odirect || td->o.mem_align || td->o.oatomic || 985 (td->io_ops->flags & FIO_RAWIO)) 986 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; 987 else 988 p = td->orig_buffer; 989 990 cl_align = os_cache_line_size(); 991 992 for (i = 0; i < max_units; i++) { 993 void *ptr; 994 995 if (td->terminate) 996 return 1; 997 998 ptr = fio_memalign(cl_align, sizeof(*io_u)); 999 if (!ptr) { 1000 log_err("fio: unable to allocate aligned memory\n"); 1001 break; 1002 } 1003 1004 io_u = ptr; 1005 memset(io_u, 0, sizeof(*io_u)); 1006 INIT_FLIST_HEAD(&io_u->verify_list); 1007 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); 1008 1009 if (data_xfer) { 1010 io_u->buf = p; 1011 dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); 1012 1013 if (td_write(td)) 1014 io_u_fill_buffer(td, io_u, min_write, max_bs); 1015 if (td_write(td) && td->o.verify_pattern_bytes) { 1016 /* 1017 * Fill the buffer with the pattern if we are 1018 * going to be doing writes. 1019 */ 1020 fill_verify_pattern(td, io_u->buf, max_bs, io_u, 0, 0); 1021 } 1022 } 1023 1024 io_u->index = i; 1025 io_u->flags = IO_U_F_FREE; 1026 io_u_qpush(&td->io_u_freelist, io_u); 1027 1028 /* 1029 * io_u never leaves this stack, used for iteration of all 1030 * io_u buffers. 1031 */ 1032 io_u_qpush(&td->io_u_all, io_u); 1033 1034 if (td->io_ops->io_u_init) { 1035 int ret = td->io_ops->io_u_init(td, io_u); 1036 1037 if (ret) { 1038 log_err("fio: failed to init engine data: %d\n", ret); 1039 return 1; 1040 } 1041 } 1042 1043 p += max_bs; 1044 } 1045 1046 return 0; 1047} 1048 1049static int switch_ioscheduler(struct thread_data *td) 1050{ 1051 char tmp[256], tmp2[128]; 1052 FILE *f; 1053 int ret; 1054 1055 if (td->io_ops->flags & FIO_DISKLESSIO) 1056 return 0; 1057 1058 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); 1059 1060 f = fopen(tmp, "r+"); 1061 if (!f) { 1062 if (errno == ENOENT) { 1063 log_err("fio: os or kernel doesn't support IO scheduler" 1064 " switching\n"); 1065 return 0; 1066 } 1067 td_verror(td, errno, "fopen iosched"); 1068 return 1; 1069 } 1070 1071 /* 1072 * Set io scheduler. 1073 */ 1074 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); 1075 if (ferror(f) || ret != 1) { 1076 td_verror(td, errno, "fwrite"); 1077 fclose(f); 1078 return 1; 1079 } 1080 1081 rewind(f); 1082 1083 /* 1084 * Read back and check that the selected scheduler is now the default. 1085 */ 1086 ret = fread(tmp, 1, sizeof(tmp), f); 1087 if (ferror(f) || ret < 0) { 1088 td_verror(td, errno, "fread"); 1089 fclose(f); 1090 return 1; 1091 } 1092 1093 sprintf(tmp2, "[%s]", td->o.ioscheduler); 1094 if (!strstr(tmp, tmp2)) { 1095 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); 1096 td_verror(td, EINVAL, "iosched_switch"); 1097 fclose(f); 1098 return 1; 1099 } 1100 1101 fclose(f); 1102 return 0; 1103} 1104 1105static int keep_running(struct thread_data *td) 1106{ 1107 if (td->done) 1108 return 0; 1109 if (td->o.time_based) 1110 return 1; 1111 if (td->o.loops) { 1112 td->o.loops--; 1113 return 1; 1114 } 1115 1116 if (td->o.size != -1ULL && ddir_rw_sum(td->io_bytes) < td->o.size) { 1117 uint64_t diff; 1118 1119 /* 1120 * If the difference is less than the minimum IO size, we 1121 * are done. 1122 */ 1123 diff = td->o.size - ddir_rw_sum(td->io_bytes); 1124 if (diff < td_max_bs(td)) 1125 return 0; 1126 1127 return 1; 1128 } 1129 1130 return 0; 1131} 1132 1133static int exec_string(struct thread_options *o, const char *string, const char *mode) 1134{ 1135 int ret, newlen = strlen(string) + strlen(o->name) + strlen(mode) + 9 + 1; 1136 char *str; 1137 1138 str = malloc(newlen); 1139 sprintf(str, "%s &> %s.%s.txt", string, o->name, mode); 1140 1141 log_info("%s : Saving output of %s in %s.%s.txt\n",o->name, mode, o->name, mode); 1142 ret = system(str); 1143 if (ret == -1) 1144 log_err("fio: exec of cmd <%s> failed\n", str); 1145 1146 free(str); 1147 return ret; 1148} 1149 1150/* 1151 * Dry run to compute correct state of numberio for verification. 1152 */ 1153static uint64_t do_dry_run(struct thread_data *td) 1154{ 1155 uint64_t bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 1156 1157 td_set_runstate(td, TD_RUNNING); 1158 1159 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || 1160 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td)) { 1161 struct io_u *io_u; 1162 int ret; 1163 1164 if (td->terminate || td->done) 1165 break; 1166 1167 io_u = get_io_u(td); 1168 if (!io_u) 1169 break; 1170 1171 io_u->flags |= IO_U_F_FLIGHT; 1172 io_u->error = 0; 1173 io_u->resid = 0; 1174 if (ddir_rw(acct_ddir(io_u))) 1175 td->io_issues[acct_ddir(io_u)]++; 1176 if (ddir_rw(io_u->ddir)) { 1177 io_u_mark_depth(td, 1); 1178 td->ts.total_io_u[io_u->ddir]++; 1179 } 1180 1181 ret = io_u_sync_complete(td, io_u, bytes_done); 1182 (void) ret; 1183 } 1184 1185 return bytes_done[DDIR_WRITE] + bytes_done[DDIR_TRIM]; 1186} 1187 1188/* 1189 * Entry point for the thread based jobs. The process based jobs end up 1190 * here as well, after a little setup. 1191 */ 1192static void *thread_main(void *data) 1193{ 1194 unsigned long long elapsed; 1195 struct thread_data *td = data; 1196 struct thread_options *o = &td->o; 1197 pthread_condattr_t attr; 1198 int clear_state; 1199 int ret; 1200 1201 if (!o->use_thread) { 1202 setsid(); 1203 td->pid = getpid(); 1204 } else 1205 td->pid = gettid(); 1206 1207 /* 1208 * fio_time_init() may not have been called yet if running as a server 1209 */ 1210 fio_time_init(); 1211 1212 fio_local_clock_init(o->use_thread); 1213 1214 dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); 1215 1216 if (is_backend) 1217 fio_server_send_start(td); 1218 1219 INIT_FLIST_HEAD(&td->io_log_list); 1220 INIT_FLIST_HEAD(&td->io_hist_list); 1221 INIT_FLIST_HEAD(&td->verify_list); 1222 INIT_FLIST_HEAD(&td->trim_list); 1223 INIT_FLIST_HEAD(&td->next_rand_list); 1224 pthread_mutex_init(&td->io_u_lock, NULL); 1225 td->io_hist_tree = RB_ROOT; 1226 1227 pthread_condattr_init(&attr); 1228 pthread_cond_init(&td->verify_cond, &attr); 1229 pthread_cond_init(&td->free_cond, &attr); 1230 1231 td_set_runstate(td, TD_INITIALIZED); 1232 dprint(FD_MUTEX, "up startup_mutex\n"); 1233 fio_mutex_up(startup_mutex); 1234 dprint(FD_MUTEX, "wait on td->mutex\n"); 1235 fio_mutex_down(td->mutex); 1236 dprint(FD_MUTEX, "done waiting on td->mutex\n"); 1237 1238 /* 1239 * the ->mutex mutex is now no longer used, close it to avoid 1240 * eating a file descriptor 1241 */ 1242 fio_mutex_remove(td->mutex); 1243 td->mutex = NULL; 1244 1245 /* 1246 * A new gid requires privilege, so we need to do this before setting 1247 * the uid. 1248 */ 1249 if (o->gid != -1U && setgid(o->gid)) { 1250 td_verror(td, errno, "setgid"); 1251 goto err; 1252 } 1253 if (o->uid != -1U && setuid(o->uid)) { 1254 td_verror(td, errno, "setuid"); 1255 goto err; 1256 } 1257 1258 /* 1259 * If we have a gettimeofday() thread, make sure we exclude that 1260 * thread from this job 1261 */ 1262 if (o->gtod_cpu) 1263 fio_cpu_clear(&o->cpumask, o->gtod_cpu); 1264 1265 /* 1266 * Set affinity first, in case it has an impact on the memory 1267 * allocations. 1268 */ 1269 if (o->cpumask_set) { 1270 ret = fio_setaffinity(td->pid, o->cpumask); 1271 if (ret == -1) { 1272 td_verror(td, errno, "cpu_set_affinity"); 1273 goto err; 1274 } 1275 } 1276 1277#ifdef CONFIG_LIBNUMA 1278 /* numa node setup */ 1279 if (o->numa_cpumask_set || o->numa_memmask_set) { 1280 int ret; 1281 1282 if (numa_available() < 0) { 1283 td_verror(td, errno, "Does not support NUMA API\n"); 1284 goto err; 1285 } 1286 1287 if (o->numa_cpumask_set) { 1288 ret = numa_run_on_node_mask(o->numa_cpunodesmask); 1289 if (ret == -1) { 1290 td_verror(td, errno, \ 1291 "numa_run_on_node_mask failed\n"); 1292 goto err; 1293 } 1294 } 1295 1296 if (o->numa_memmask_set) { 1297 1298 switch (o->numa_mem_mode) { 1299 case MPOL_INTERLEAVE: 1300 numa_set_interleave_mask(o->numa_memnodesmask); 1301 break; 1302 case MPOL_BIND: 1303 numa_set_membind(o->numa_memnodesmask); 1304 break; 1305 case MPOL_LOCAL: 1306 numa_set_localalloc(); 1307 break; 1308 case MPOL_PREFERRED: 1309 numa_set_preferred(o->numa_mem_prefer_node); 1310 break; 1311 case MPOL_DEFAULT: 1312 default: 1313 break; 1314 } 1315 1316 } 1317 } 1318#endif 1319 1320 if (fio_pin_memory(td)) 1321 goto err; 1322 1323 /* 1324 * May alter parameters that init_io_u() will use, so we need to 1325 * do this first. 1326 */ 1327 if (init_iolog(td)) 1328 goto err; 1329 1330 if (init_io_u(td)) 1331 goto err; 1332 1333 if (o->verify_async && verify_async_init(td)) 1334 goto err; 1335 1336 if (o->ioprio) { 1337 ret = ioprio_set(IOPRIO_WHO_PROCESS, 0, o->ioprio_class, o->ioprio); 1338 if (ret == -1) { 1339 td_verror(td, errno, "ioprio_set"); 1340 goto err; 1341 } 1342 } 1343 1344 if (o->cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) 1345 goto err; 1346 1347 errno = 0; 1348 if (nice(o->nice) == -1 && errno != 0) { 1349 td_verror(td, errno, "nice"); 1350 goto err; 1351 } 1352 1353 if (o->ioscheduler && switch_ioscheduler(td)) 1354 goto err; 1355 1356 if (!o->create_serialize && setup_files(td)) 1357 goto err; 1358 1359 if (td_io_init(td)) 1360 goto err; 1361 1362 if (init_random_map(td)) 1363 goto err; 1364 1365 if (o->exec_prerun && exec_string(o, o->exec_prerun, (const char *)"prerun")) 1366 goto err; 1367 1368 if (o->pre_read) { 1369 if (pre_read_files(td) < 0) 1370 goto err; 1371 } 1372 1373 fio_verify_init(td); 1374 1375 fio_gettime(&td->epoch, NULL); 1376 fio_getrusage(&td->ru_start); 1377 clear_state = 0; 1378 while (keep_running(td)) { 1379 uint64_t verify_bytes; 1380 1381 fio_gettime(&td->start, NULL); 1382 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); 1383 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); 1384 memcpy(&td->tv_cache, &td->start, sizeof(td->start)); 1385 1386 if (o->ratemin[DDIR_READ] || o->ratemin[DDIR_WRITE] || 1387 o->ratemin[DDIR_TRIM]) { 1388 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, 1389 sizeof(td->bw_sample_time)); 1390 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, 1391 sizeof(td->bw_sample_time)); 1392 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, 1393 sizeof(td->bw_sample_time)); 1394 } 1395 1396 if (clear_state) 1397 clear_io_state(td); 1398 1399 prune_io_piece_log(td); 1400 1401 if (td->o.verify_only && (td_write(td) || td_rw(td))) 1402 verify_bytes = do_dry_run(td); 1403 else 1404 verify_bytes = do_io(td); 1405 1406 clear_state = 1; 1407 1408 if (td_read(td) && td->io_bytes[DDIR_READ]) { 1409 elapsed = utime_since_now(&td->start); 1410 td->ts.runtime[DDIR_READ] += elapsed; 1411 } 1412 if (td_write(td) && td->io_bytes[DDIR_WRITE]) { 1413 elapsed = utime_since_now(&td->start); 1414 td->ts.runtime[DDIR_WRITE] += elapsed; 1415 } 1416 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { 1417 elapsed = utime_since_now(&td->start); 1418 td->ts.runtime[DDIR_TRIM] += elapsed; 1419 } 1420 1421 if (td->error || td->terminate) 1422 break; 1423 1424 if (!o->do_verify || 1425 o->verify == VERIFY_NONE || 1426 (td->io_ops->flags & FIO_UNIDIR)) 1427 continue; 1428 1429 clear_io_state(td); 1430 1431 fio_gettime(&td->start, NULL); 1432 1433 do_verify(td, verify_bytes); 1434 1435 td->ts.runtime[DDIR_READ] += utime_since_now(&td->start); 1436 1437 if (td->error || td->terminate) 1438 break; 1439 } 1440 1441 update_rusage_stat(td); 1442 td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; 1443 td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; 1444 td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; 1445 td->ts.total_run_time = mtime_since_now(&td->epoch); 1446 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; 1447 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; 1448 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; 1449 1450 fio_unpin_memory(td); 1451 1452 fio_mutex_down(writeout_mutex); 1453 if (td->bw_log) { 1454 if (o->bw_log_file) { 1455 finish_log_named(td, td->bw_log, 1456 o->bw_log_file, "bw"); 1457 } else 1458 finish_log(td, td->bw_log, "bw"); 1459 } 1460 if (td->lat_log) { 1461 if (o->lat_log_file) { 1462 finish_log_named(td, td->lat_log, 1463 o->lat_log_file, "lat"); 1464 } else 1465 finish_log(td, td->lat_log, "lat"); 1466 } 1467 if (td->slat_log) { 1468 if (o->lat_log_file) { 1469 finish_log_named(td, td->slat_log, 1470 o->lat_log_file, "slat"); 1471 } else 1472 finish_log(td, td->slat_log, "slat"); 1473 } 1474 if (td->clat_log) { 1475 if (o->lat_log_file) { 1476 finish_log_named(td, td->clat_log, 1477 o->lat_log_file, "clat"); 1478 } else 1479 finish_log(td, td->clat_log, "clat"); 1480 } 1481 if (td->iops_log) { 1482 if (o->iops_log_file) { 1483 finish_log_named(td, td->iops_log, 1484 o->iops_log_file, "iops"); 1485 } else 1486 finish_log(td, td->iops_log, "iops"); 1487 } 1488 1489 fio_mutex_up(writeout_mutex); 1490 if (o->exec_postrun) 1491 exec_string(o, o->exec_postrun, (const char *)"postrun"); 1492 1493 if (exitall_on_terminate) 1494 fio_terminate_threads(td->groupid); 1495 1496err: 1497 if (td->error) 1498 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, 1499 td->verror); 1500 1501 if (o->verify_async) 1502 verify_async_exit(td); 1503 1504 close_and_free_files(td); 1505 cleanup_io_u(td); 1506 close_ioengine(td); 1507 cgroup_shutdown(td, &cgroup_mnt); 1508 1509 if (o->cpumask_set) { 1510 int ret = fio_cpuset_exit(&o->cpumask); 1511 1512 td_verror(td, ret, "fio_cpuset_exit"); 1513 } 1514 1515 /* 1516 * do this very late, it will log file closing as well 1517 */ 1518 if (o->write_iolog_file) 1519 write_iolog_close(td); 1520 1521 fio_mutex_remove(td->rusage_sem); 1522 td->rusage_sem = NULL; 1523 1524 td_set_runstate(td, TD_EXITED); 1525 return (void *) (uintptr_t) td->error; 1526} 1527 1528 1529/* 1530 * We cannot pass the td data into a forked process, so attach the td and 1531 * pass it to the thread worker. 1532 */ 1533static int fork_main(int shmid, int offset) 1534{ 1535 struct thread_data *td; 1536 void *data, *ret; 1537 1538#ifndef __hpux 1539 data = shmat(shmid, NULL, 0); 1540 if (data == (void *) -1) { 1541 int __err = errno; 1542 1543 perror("shmat"); 1544 return __err; 1545 } 1546#else 1547 /* 1548 * HP-UX inherits shm mappings? 1549 */ 1550 data = threads; 1551#endif 1552 1553 td = data + offset * sizeof(struct thread_data); 1554 ret = thread_main(td); 1555 shmdt(data); 1556 return (int) (uintptr_t) ret; 1557} 1558 1559/* 1560 * Run over the job map and reap the threads that have exited, if any. 1561 */ 1562static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, 1563 unsigned int *m_rate) 1564{ 1565 struct thread_data *td; 1566 unsigned int cputhreads, realthreads, pending; 1567 int i, status, ret; 1568 1569 /* 1570 * reap exited threads (TD_EXITED -> TD_REAPED) 1571 */ 1572 realthreads = pending = cputhreads = 0; 1573 for_each_td(td, i) { 1574 int flags = 0; 1575 1576 /* 1577 * ->io_ops is NULL for a thread that has closed its 1578 * io engine 1579 */ 1580 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) 1581 cputhreads++; 1582 else 1583 realthreads++; 1584 1585 if (!td->pid) { 1586 pending++; 1587 continue; 1588 } 1589 if (td->runstate == TD_REAPED) 1590 continue; 1591 if (td->o.use_thread) { 1592 if (td->runstate == TD_EXITED) { 1593 td_set_runstate(td, TD_REAPED); 1594 goto reaped; 1595 } 1596 continue; 1597 } 1598 1599 flags = WNOHANG; 1600 if (td->runstate == TD_EXITED) 1601 flags = 0; 1602 1603 /* 1604 * check if someone quit or got killed in an unusual way 1605 */ 1606 ret = waitpid(td->pid, &status, flags); 1607 if (ret < 0) { 1608 if (errno == ECHILD) { 1609 log_err("fio: pid=%d disappeared %d\n", 1610 (int) td->pid, td->runstate); 1611 td->sig = ECHILD; 1612 td_set_runstate(td, TD_REAPED); 1613 goto reaped; 1614 } 1615 perror("waitpid"); 1616 } else if (ret == td->pid) { 1617 if (WIFSIGNALED(status)) { 1618 int sig = WTERMSIG(status); 1619 1620 if (sig != SIGTERM && sig != SIGUSR2) 1621 log_err("fio: pid=%d, got signal=%d\n", 1622 (int) td->pid, sig); 1623 td->sig = sig; 1624 td_set_runstate(td, TD_REAPED); 1625 goto reaped; 1626 } 1627 if (WIFEXITED(status)) { 1628 if (WEXITSTATUS(status) && !td->error) 1629 td->error = WEXITSTATUS(status); 1630 1631 td_set_runstate(td, TD_REAPED); 1632 goto reaped; 1633 } 1634 } 1635 1636 /* 1637 * thread is not dead, continue 1638 */ 1639 pending++; 1640 continue; 1641reaped: 1642 (*nr_running)--; 1643 (*m_rate) -= ddir_rw_sum(td->o.ratemin); 1644 (*t_rate) -= ddir_rw_sum(td->o.rate); 1645 if (!td->pid) 1646 pending--; 1647 1648 if (td->error) 1649 exit_value++; 1650 1651 done_secs += mtime_since_now(&td->epoch) / 1000; 1652 profile_td_exit(td); 1653 } 1654 1655 if (*nr_running == cputhreads && !pending && realthreads) 1656 fio_terminate_threads(TERMINATE_ALL); 1657} 1658 1659static void do_usleep(unsigned int usecs) 1660{ 1661 check_for_running_stats(); 1662 usleep(usecs); 1663} 1664 1665/* 1666 * Main function for kicking off and reaping jobs, as needed. 1667 */ 1668static void run_threads(void) 1669{ 1670 struct thread_data *td; 1671 unsigned long spent; 1672 unsigned int i, todo, nr_running, m_rate, t_rate, nr_started; 1673 1674 if (fio_gtod_offload && fio_start_gtod_thread()) 1675 return; 1676 1677 fio_idle_prof_init(); 1678 1679 set_sig_handlers(); 1680 1681 nr_thread = nr_process = 0; 1682 for_each_td(td, i) { 1683 if (td->o.use_thread) 1684 nr_thread++; 1685 else 1686 nr_process++; 1687 } 1688 1689 if (output_format == FIO_OUTPUT_NORMAL) { 1690 log_info("Starting "); 1691 if (nr_thread) 1692 log_info("%d thread%s", nr_thread, 1693 nr_thread > 1 ? "s" : ""); 1694 if (nr_process) { 1695 if (nr_thread) 1696 log_info(" and "); 1697 log_info("%d process%s", nr_process, 1698 nr_process > 1 ? "es" : ""); 1699 } 1700 log_info("\n"); 1701 fflush(stdout); 1702 } 1703 1704 todo = thread_number; 1705 nr_running = 0; 1706 nr_started = 0; 1707 m_rate = t_rate = 0; 1708 1709 for_each_td(td, i) { 1710 print_status_init(td->thread_number - 1); 1711 1712 if (!td->o.create_serialize) 1713 continue; 1714 1715 /* 1716 * do file setup here so it happens sequentially, 1717 * we don't want X number of threads getting their 1718 * client data interspersed on disk 1719 */ 1720 if (setup_files(td)) { 1721 exit_value++; 1722 if (td->error) 1723 log_err("fio: pid=%d, err=%d/%s\n", 1724 (int) td->pid, td->error, td->verror); 1725 td_set_runstate(td, TD_REAPED); 1726 todo--; 1727 } else { 1728 struct fio_file *f; 1729 unsigned int j; 1730 1731 /* 1732 * for sharing to work, each job must always open 1733 * its own files. so close them, if we opened them 1734 * for creation 1735 */ 1736 for_each_file(td, f, j) { 1737 if (fio_file_open(f)) 1738 td_io_close_file(td, f); 1739 } 1740 } 1741 } 1742 1743 /* start idle threads before io threads start to run */ 1744 fio_idle_prof_start(); 1745 1746 set_genesis_time(); 1747 1748 while (todo) { 1749 struct thread_data *map[REAL_MAX_JOBS]; 1750 struct timeval this_start; 1751 int this_jobs = 0, left; 1752 1753 /* 1754 * create threads (TD_NOT_CREATED -> TD_CREATED) 1755 */ 1756 for_each_td(td, i) { 1757 if (td->runstate != TD_NOT_CREATED) 1758 continue; 1759 1760 /* 1761 * never got a chance to start, killed by other 1762 * thread for some reason 1763 */ 1764 if (td->terminate) { 1765 todo--; 1766 continue; 1767 } 1768 1769 if (td->o.start_delay) { 1770 spent = mtime_since_genesis(); 1771 1772 if (td->o.start_delay * 1000 > spent) 1773 continue; 1774 } 1775 1776 if (td->o.stonewall && (nr_started || nr_running)) { 1777 dprint(FD_PROCESS, "%s: stonewall wait\n", 1778 td->o.name); 1779 break; 1780 } 1781 1782 init_disk_util(td); 1783 1784 td->rusage_sem = fio_mutex_init(FIO_MUTEX_LOCKED); 1785 td->update_rusage = 0; 1786 1787 /* 1788 * Set state to created. Thread will transition 1789 * to TD_INITIALIZED when it's done setting up. 1790 */ 1791 td_set_runstate(td, TD_CREATED); 1792 map[this_jobs++] = td; 1793 nr_started++; 1794 1795 if (td->o.use_thread) { 1796 int ret; 1797 1798 dprint(FD_PROCESS, "will pthread_create\n"); 1799 ret = pthread_create(&td->thread, NULL, 1800 thread_main, td); 1801 if (ret) { 1802 log_err("pthread_create: %s\n", 1803 strerror(ret)); 1804 nr_started--; 1805 break; 1806 } 1807 ret = pthread_detach(td->thread); 1808 if (ret) 1809 log_err("pthread_detach: %s", 1810 strerror(ret)); 1811 } else { 1812 pid_t pid; 1813 dprint(FD_PROCESS, "will fork\n"); 1814 pid = fork(); 1815 if (!pid) { 1816 int ret = fork_main(shm_id, i); 1817 1818 _exit(ret); 1819 } else if (i == fio_debug_jobno) 1820 *fio_debug_jobp = pid; 1821 } 1822 dprint(FD_MUTEX, "wait on startup_mutex\n"); 1823 if (fio_mutex_down_timeout(startup_mutex, 10)) { 1824 log_err("fio: job startup hung? exiting.\n"); 1825 fio_terminate_threads(TERMINATE_ALL); 1826 fio_abort = 1; 1827 nr_started--; 1828 break; 1829 } 1830 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 1831 } 1832 1833 /* 1834 * Wait for the started threads to transition to 1835 * TD_INITIALIZED. 1836 */ 1837 fio_gettime(&this_start, NULL); 1838 left = this_jobs; 1839 while (left && !fio_abort) { 1840 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT) 1841 break; 1842 1843 do_usleep(100000); 1844 1845 for (i = 0; i < this_jobs; i++) { 1846 td = map[i]; 1847 if (!td) 1848 continue; 1849 if (td->runstate == TD_INITIALIZED) { 1850 map[i] = NULL; 1851 left--; 1852 } else if (td->runstate >= TD_EXITED) { 1853 map[i] = NULL; 1854 left--; 1855 todo--; 1856 nr_running++; /* work-around... */ 1857 } 1858 } 1859 } 1860 1861 if (left) { 1862 log_err("fio: %d job%s failed to start\n", left, 1863 left > 1 ? "s" : ""); 1864 for (i = 0; i < this_jobs; i++) { 1865 td = map[i]; 1866 if (!td) 1867 continue; 1868 kill(td->pid, SIGTERM); 1869 } 1870 break; 1871 } 1872 1873 /* 1874 * start created threads (TD_INITIALIZED -> TD_RUNNING). 1875 */ 1876 for_each_td(td, i) { 1877 if (td->runstate != TD_INITIALIZED) 1878 continue; 1879 1880 if (in_ramp_time(td)) 1881 td_set_runstate(td, TD_RAMP); 1882 else 1883 td_set_runstate(td, TD_RUNNING); 1884 nr_running++; 1885 nr_started--; 1886 m_rate += ddir_rw_sum(td->o.ratemin); 1887 t_rate += ddir_rw_sum(td->o.rate); 1888 todo--; 1889 fio_mutex_up(td->mutex); 1890 } 1891 1892 reap_threads(&nr_running, &t_rate, &m_rate); 1893 1894 if (todo) 1895 do_usleep(100000); 1896 } 1897 1898 while (nr_running) { 1899 reap_threads(&nr_running, &t_rate, &m_rate); 1900 do_usleep(10000); 1901 } 1902 1903 fio_idle_prof_stop(); 1904 1905 update_io_ticks(); 1906} 1907 1908void wait_for_disk_thread_exit(void) 1909{ 1910 fio_mutex_down(disk_thread_mutex); 1911} 1912 1913static void free_disk_util(void) 1914{ 1915 disk_util_start_exit(); 1916 wait_for_disk_thread_exit(); 1917 disk_util_prune_entries(); 1918} 1919 1920static void *disk_thread_main(void *data) 1921{ 1922 int ret = 0; 1923 1924 fio_mutex_up(startup_mutex); 1925 1926 while (threads && !ret) { 1927 usleep(DISK_UTIL_MSEC * 1000); 1928 if (!threads) 1929 break; 1930 ret = update_io_ticks(); 1931 1932 if (!is_backend) 1933 print_thread_status(); 1934 } 1935 1936 fio_mutex_up(disk_thread_mutex); 1937 return NULL; 1938} 1939 1940static int create_disk_util_thread(void) 1941{ 1942 int ret; 1943 1944 setup_disk_util(); 1945 1946 disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 1947 1948 ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); 1949 if (ret) { 1950 fio_mutex_remove(disk_thread_mutex); 1951 log_err("Can't create disk util thread: %s\n", strerror(ret)); 1952 return 1; 1953 } 1954 1955 ret = pthread_detach(disk_util_thread); 1956 if (ret) { 1957 fio_mutex_remove(disk_thread_mutex); 1958 log_err("Can't detatch disk util thread: %s\n", strerror(ret)); 1959 return 1; 1960 } 1961 1962 dprint(FD_MUTEX, "wait on startup_mutex\n"); 1963 fio_mutex_down(startup_mutex); 1964 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 1965 return 0; 1966} 1967 1968int fio_backend(void) 1969{ 1970 struct thread_data *td; 1971 int i; 1972 1973 if (exec_profile) { 1974 if (load_profile(exec_profile)) 1975 return 1; 1976 free(exec_profile); 1977 exec_profile = NULL; 1978 } 1979 if (!thread_number) 1980 return 0; 1981 1982 if (write_bw_log) { 1983 setup_log(&agg_io_log[DDIR_READ], 0, IO_LOG_TYPE_BW); 1984 setup_log(&agg_io_log[DDIR_WRITE], 0, IO_LOG_TYPE_BW); 1985 setup_log(&agg_io_log[DDIR_TRIM], 0, IO_LOG_TYPE_BW); 1986 } 1987 1988 startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 1989 if (startup_mutex == NULL) 1990 return 1; 1991 writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED); 1992 if (writeout_mutex == NULL) 1993 return 1; 1994 1995 set_genesis_time(); 1996 stat_init(); 1997 create_disk_util_thread(); 1998 1999 cgroup_list = smalloc(sizeof(*cgroup_list)); 2000 INIT_FLIST_HEAD(cgroup_list); 2001 2002 run_threads(); 2003 2004 if (!fio_abort) { 2005 show_run_stats(); 2006 if (write_bw_log) { 2007 __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log"); 2008 __finish_log(agg_io_log[DDIR_WRITE], 2009 "agg-write_bw.log"); 2010 __finish_log(agg_io_log[DDIR_TRIM], 2011 "agg-write_bw.log"); 2012 } 2013 } 2014 2015 for_each_td(td, i) 2016 fio_options_free(td); 2017 2018 free_disk_util(); 2019 cgroup_kill(cgroup_list); 2020 sfree(cgroup_list); 2021 sfree(cgroup_mnt); 2022 2023 fio_mutex_remove(startup_mutex); 2024 fio_mutex_remove(writeout_mutex); 2025 fio_mutex_remove(disk_thread_mutex); 2026 stat_exit(); 2027 return exit_value; 2028} 2029