backend.c revision f3afa57e36550288340f1b6c694f354ae72654b9
1/* 2 * fio - the flexible io tester 3 * 4 * Copyright (C) 2005 Jens Axboe <axboe@suse.de> 5 * Copyright (C) 2006-2012 Jens Axboe <axboe@kernel.dk> 6 * 7 * The license below covers all files distributed with fio unless otherwise 8 * noted in the file itself. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * 23 */ 24#include <unistd.h> 25#include <fcntl.h> 26#include <string.h> 27#include <limits.h> 28#include <signal.h> 29#include <time.h> 30#include <locale.h> 31#include <assert.h> 32#include <time.h> 33#include <inttypes.h> 34#include <sys/stat.h> 35#include <sys/wait.h> 36#include <sys/ipc.h> 37#include <sys/shm.h> 38#include <sys/mman.h> 39 40#include "fio.h" 41#include "hash.h" 42#include "smalloc.h" 43#include "verify.h" 44#include "trim.h" 45#include "diskutil.h" 46#include "cgroup.h" 47#include "profile.h" 48#include "lib/rand.h" 49#include "memalign.h" 50#include "server.h" 51 52static pthread_t disk_util_thread; 53static struct fio_mutex *disk_thread_mutex; 54static struct fio_mutex *startup_mutex; 55static struct fio_mutex *writeout_mutex; 56static struct flist_head *cgroup_list; 57static char *cgroup_mnt; 58static int exit_value; 59static volatile int fio_abort; 60 61struct io_log *agg_io_log[DDIR_RWDIR_CNT]; 62 63int groupid = 0; 64unsigned int thread_number = 0; 65unsigned int nr_process = 0; 66unsigned int nr_thread = 0; 67int shm_id = 0; 68int temp_stall_ts; 69unsigned long done_secs = 0; 70 71#define PAGE_ALIGN(buf) \ 72 (char *) (((uintptr_t) (buf) + page_mask) & ~page_mask) 73 74#define JOB_START_TIMEOUT (5 * 1000) 75 76static void sig_int(int sig) 77{ 78 if (threads) { 79 if (is_backend) 80 fio_server_got_signal(sig); 81 else { 82 log_info("\nfio: terminating on signal %d\n", sig); 83 fflush(stdout); 84 exit_value = 128; 85 } 86 87 fio_terminate_threads(TERMINATE_ALL); 88 } 89} 90 91static void sig_show_status(int sig) 92{ 93 show_running_run_stats(); 94} 95 96static void set_sig_handlers(void) 97{ 98 struct sigaction act; 99 100 memset(&act, 0, sizeof(act)); 101 act.sa_handler = sig_int; 102 act.sa_flags = SA_RESTART; 103 sigaction(SIGINT, &act, NULL); 104 105 memset(&act, 0, sizeof(act)); 106 act.sa_handler = sig_int; 107 act.sa_flags = SA_RESTART; 108 sigaction(SIGTERM, &act, NULL); 109 110 memset(&act, 0, sizeof(act)); 111 act.sa_handler = sig_show_status; 112 act.sa_flags = SA_RESTART; 113 sigaction(SIGUSR1, &act, NULL); 114 115 if (is_backend) { 116 memset(&act, 0, sizeof(act)); 117 act.sa_handler = sig_int; 118 act.sa_flags = SA_RESTART; 119 sigaction(SIGPIPE, &act, NULL); 120 } 121} 122 123/* 124 * Check if we are above the minimum rate given. 125 */ 126static int __check_min_rate(struct thread_data *td, struct timeval *now, 127 enum fio_ddir ddir) 128{ 129 unsigned long long bytes = 0; 130 unsigned long iops = 0; 131 unsigned long spent; 132 unsigned long rate; 133 unsigned int ratemin = 0; 134 unsigned int rate_iops = 0; 135 unsigned int rate_iops_min = 0; 136 137 assert(ddir_rw(ddir)); 138 139 if (!td->o.ratemin[ddir] && !td->o.rate_iops_min[ddir]) 140 return 0; 141 142 /* 143 * allow a 2 second settle period in the beginning 144 */ 145 if (mtime_since(&td->start, now) < 2000) 146 return 0; 147 148 iops += td->this_io_blocks[ddir]; 149 bytes += td->this_io_bytes[ddir]; 150 ratemin += td->o.ratemin[ddir]; 151 rate_iops += td->o.rate_iops[ddir]; 152 rate_iops_min += td->o.rate_iops_min[ddir]; 153 154 /* 155 * if rate blocks is set, sample is running 156 */ 157 if (td->rate_bytes[ddir] || td->rate_blocks[ddir]) { 158 spent = mtime_since(&td->lastrate[ddir], now); 159 if (spent < td->o.ratecycle) 160 return 0; 161 162 if (td->o.rate[ddir]) { 163 /* 164 * check bandwidth specified rate 165 */ 166 if (bytes < td->rate_bytes[ddir]) { 167 log_err("%s: min rate %u not met\n", td->o.name, 168 ratemin); 169 return 1; 170 } else { 171 rate = ((bytes - td->rate_bytes[ddir]) * 1000) / spent; 172 if (rate < ratemin || 173 bytes < td->rate_bytes[ddir]) { 174 log_err("%s: min rate %u not met, got" 175 " %luKB/sec\n", td->o.name, 176 ratemin, rate); 177 return 1; 178 } 179 } 180 } else { 181 /* 182 * checks iops specified rate 183 */ 184 if (iops < rate_iops) { 185 log_err("%s: min iops rate %u not met\n", 186 td->o.name, rate_iops); 187 return 1; 188 } else { 189 rate = ((iops - td->rate_blocks[ddir]) * 1000) / spent; 190 if (rate < rate_iops_min || 191 iops < td->rate_blocks[ddir]) { 192 log_err("%s: min iops rate %u not met," 193 " got %lu\n", td->o.name, 194 rate_iops_min, rate); 195 } 196 } 197 } 198 } 199 200 td->rate_bytes[ddir] = bytes; 201 td->rate_blocks[ddir] = iops; 202 memcpy(&td->lastrate[ddir], now, sizeof(*now)); 203 return 0; 204} 205 206static int check_min_rate(struct thread_data *td, struct timeval *now, 207 unsigned long *bytes_done) 208{ 209 int ret = 0; 210 211 if (bytes_done[DDIR_READ]) 212 ret |= __check_min_rate(td, now, DDIR_READ); 213 if (bytes_done[DDIR_WRITE]) 214 ret |= __check_min_rate(td, now, DDIR_WRITE); 215 if (bytes_done[DDIR_TRIM]) 216 ret |= __check_min_rate(td, now, DDIR_TRIM); 217 218 return ret; 219} 220 221/* 222 * When job exits, we can cancel the in-flight IO if we are using async 223 * io. Attempt to do so. 224 */ 225static void cleanup_pending_aio(struct thread_data *td) 226{ 227 struct flist_head *entry, *n; 228 struct io_u *io_u; 229 int r; 230 231 /* 232 * get immediately available events, if any 233 */ 234 r = io_u_queued_complete(td, 0, NULL); 235 if (r < 0) 236 return; 237 238 /* 239 * now cancel remaining active events 240 */ 241 if (td->io_ops->cancel) { 242 flist_for_each_safe(entry, n, &td->io_u_busylist) { 243 io_u = flist_entry(entry, struct io_u, list); 244 245 /* 246 * if the io_u isn't in flight, then that generally 247 * means someone leaked an io_u. complain but fix 248 * it up, so we don't stall here. 249 */ 250 if ((io_u->flags & IO_U_F_FLIGHT) == 0) { 251 log_err("fio: non-busy IO on busy list\n"); 252 put_io_u(td, io_u); 253 } else { 254 r = td->io_ops->cancel(td, io_u); 255 if (!r) 256 put_io_u(td, io_u); 257 } 258 } 259 } 260 261 if (td->cur_depth) 262 r = io_u_queued_complete(td, td->cur_depth, NULL); 263} 264 265/* 266 * Helper to handle the final sync of a file. Works just like the normal 267 * io path, just does everything sync. 268 */ 269static int fio_io_sync(struct thread_data *td, struct fio_file *f) 270{ 271 struct io_u *io_u = __get_io_u(td); 272 int ret; 273 274 if (!io_u) 275 return 1; 276 277 io_u->ddir = DDIR_SYNC; 278 io_u->file = f; 279 280 if (td_io_prep(td, io_u)) { 281 put_io_u(td, io_u); 282 return 1; 283 } 284 285requeue: 286 ret = td_io_queue(td, io_u); 287 if (ret < 0) { 288 td_verror(td, io_u->error, "td_io_queue"); 289 put_io_u(td, io_u); 290 return 1; 291 } else if (ret == FIO_Q_QUEUED) { 292 if (io_u_queued_complete(td, 1, NULL) < 0) 293 return 1; 294 } else if (ret == FIO_Q_COMPLETED) { 295 if (io_u->error) { 296 td_verror(td, io_u->error, "td_io_queue"); 297 return 1; 298 } 299 300 if (io_u_sync_complete(td, io_u, NULL) < 0) 301 return 1; 302 } else if (ret == FIO_Q_BUSY) { 303 if (td_io_commit(td)) 304 return 1; 305 goto requeue; 306 } 307 308 return 0; 309} 310 311static inline void __update_tv_cache(struct thread_data *td) 312{ 313 fio_gettime(&td->tv_cache, NULL); 314} 315 316static inline void update_tv_cache(struct thread_data *td) 317{ 318 if ((++td->tv_cache_nr & td->tv_cache_mask) == td->tv_cache_mask) 319 __update_tv_cache(td); 320} 321 322static inline int runtime_exceeded(struct thread_data *td, struct timeval *t) 323{ 324 if (in_ramp_time(td)) 325 return 0; 326 if (!td->o.timeout) 327 return 0; 328 if (mtime_since(&td->epoch, t) >= td->o.timeout * 1000) 329 return 1; 330 331 return 0; 332} 333 334static int break_on_this_error(struct thread_data *td, enum fio_ddir ddir, 335 int *retptr) 336{ 337 int ret = *retptr; 338 339 if (ret < 0 || td->error) { 340 int err; 341 342 if (ret < 0) 343 err = -ret; 344 else 345 err = td->error; 346 347 if (!(td->o.continue_on_error & td_error_type(ddir, err))) 348 return 1; 349 350 if (td_non_fatal_error(err)) { 351 /* 352 * Continue with the I/Os in case of 353 * a non fatal error. 354 */ 355 update_error_count(td, err); 356 td_clear_error(td); 357 *retptr = 0; 358 return 0; 359 } else if (td->o.fill_device && err == ENOSPC) { 360 /* 361 * We expect to hit this error if 362 * fill_device option is set. 363 */ 364 td_clear_error(td); 365 td->terminate = 1; 366 return 1; 367 } else { 368 /* 369 * Stop the I/O in case of a fatal 370 * error. 371 */ 372 update_error_count(td, err); 373 return 1; 374 } 375 } 376 377 return 0; 378} 379 380/* 381 * The main verify engine. Runs over the writes we previously submitted, 382 * reads the blocks back in, and checks the crc/md5 of the data. 383 */ 384static void do_verify(struct thread_data *td) 385{ 386 struct fio_file *f; 387 struct io_u *io_u; 388 int ret, min_events; 389 unsigned int i; 390 391 dprint(FD_VERIFY, "starting loop\n"); 392 393 /* 394 * sync io first and invalidate cache, to make sure we really 395 * read from disk. 396 */ 397 for_each_file(td, f, i) { 398 if (!fio_file_open(f)) 399 continue; 400 if (fio_io_sync(td, f)) 401 break; 402 if (file_invalidate_cache(td, f)) 403 break; 404 } 405 406 if (td->error) 407 return; 408 409 td_set_runstate(td, TD_VERIFYING); 410 411 io_u = NULL; 412 while (!td->terminate) { 413 int ret2, full; 414 415 update_tv_cache(td); 416 417 if (runtime_exceeded(td, &td->tv_cache)) { 418 __update_tv_cache(td); 419 if (runtime_exceeded(td, &td->tv_cache)) { 420 td->terminate = 1; 421 break; 422 } 423 } 424 425 if (flow_threshold_exceeded(td)) 426 continue; 427 428 io_u = __get_io_u(td); 429 if (!io_u) 430 break; 431 432 if (get_next_verify(td, io_u)) { 433 put_io_u(td, io_u); 434 break; 435 } 436 437 if (td_io_prep(td, io_u)) { 438 put_io_u(td, io_u); 439 break; 440 } 441 442 if (td->o.verify_async) 443 io_u->end_io = verify_io_u_async; 444 else 445 io_u->end_io = verify_io_u; 446 447 ret = td_io_queue(td, io_u); 448 switch (ret) { 449 case FIO_Q_COMPLETED: 450 if (io_u->error) { 451 ret = -io_u->error; 452 clear_io_u(td, io_u); 453 } else if (io_u->resid) { 454 int bytes = io_u->xfer_buflen - io_u->resid; 455 456 /* 457 * zero read, fail 458 */ 459 if (!bytes) { 460 td_verror(td, EIO, "full resid"); 461 put_io_u(td, io_u); 462 break; 463 } 464 465 io_u->xfer_buflen = io_u->resid; 466 io_u->xfer_buf += bytes; 467 io_u->offset += bytes; 468 469 if (ddir_rw(io_u->ddir)) 470 td->ts.short_io_u[io_u->ddir]++; 471 472 f = io_u->file; 473 if (io_u->offset == f->real_file_size) 474 goto sync_done; 475 476 requeue_io_u(td, &io_u); 477 } else { 478sync_done: 479 ret = io_u_sync_complete(td, io_u, NULL); 480 if (ret < 0) 481 break; 482 } 483 continue; 484 case FIO_Q_QUEUED: 485 break; 486 case FIO_Q_BUSY: 487 requeue_io_u(td, &io_u); 488 ret2 = td_io_commit(td); 489 if (ret2 < 0) 490 ret = ret2; 491 break; 492 default: 493 assert(ret < 0); 494 td_verror(td, -ret, "td_io_queue"); 495 break; 496 } 497 498 if (break_on_this_error(td, io_u->ddir, &ret)) 499 break; 500 501 /* 502 * if we can queue more, do so. but check if there are 503 * completed io_u's first. Note that we can get BUSY even 504 * without IO queued, if the system is resource starved. 505 */ 506 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 507 if (full || !td->o.iodepth_batch_complete) { 508 min_events = min(td->o.iodepth_batch_complete, 509 td->cur_depth); 510 /* 511 * if the queue is full, we MUST reap at least 1 event 512 */ 513 if (full && !min_events) 514 min_events = 1; 515 516 do { 517 /* 518 * Reap required number of io units, if any, 519 * and do the verification on them through 520 * the callback handler 521 */ 522 if (io_u_queued_complete(td, min_events, NULL) < 0) { 523 ret = -1; 524 break; 525 } 526 } while (full && (td->cur_depth > td->o.iodepth_low)); 527 } 528 if (ret < 0) 529 break; 530 } 531 532 if (!td->error) { 533 min_events = td->cur_depth; 534 535 if (min_events) 536 ret = io_u_queued_complete(td, min_events, NULL); 537 } else 538 cleanup_pending_aio(td); 539 540 td_set_runstate(td, TD_RUNNING); 541 542 dprint(FD_VERIFY, "exiting loop\n"); 543} 544 545static int io_bytes_exceeded(struct thread_data *td) 546{ 547 unsigned long long bytes; 548 549 if (td_rw(td)) 550 bytes = td->this_io_bytes[DDIR_READ] + td->this_io_bytes[DDIR_WRITE]; 551 else if (td_write(td)) 552 bytes = td->this_io_bytes[DDIR_WRITE]; 553 else if (td_read(td)) 554 bytes = td->this_io_bytes[DDIR_READ]; 555 else 556 bytes = td->this_io_bytes[DDIR_TRIM]; 557 558 return bytes >= td->o.size; 559} 560 561/* 562 * Main IO worker function. It retrieves io_u's to process and queues 563 * and reaps them, checking for rate and errors along the way. 564 */ 565static void do_io(struct thread_data *td) 566{ 567 unsigned int i; 568 int ret = 0; 569 570 if (in_ramp_time(td)) 571 td_set_runstate(td, TD_RAMP); 572 else 573 td_set_runstate(td, TD_RUNNING); 574 575 while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || 576 (!flist_empty(&td->trim_list)) || !io_bytes_exceeded(td) || 577 td->o.time_based) { 578 struct timeval comp_time; 579 unsigned long bytes_done[DDIR_RWDIR_CNT] = { 0, 0, 0 }; 580 int min_evts = 0; 581 struct io_u *io_u; 582 int ret2, full; 583 enum fio_ddir ddir; 584 585 if (td->terminate) 586 break; 587 588 update_tv_cache(td); 589 590 if (runtime_exceeded(td, &td->tv_cache)) { 591 __update_tv_cache(td); 592 if (runtime_exceeded(td, &td->tv_cache)) { 593 td->terminate = 1; 594 break; 595 } 596 } 597 598 if (flow_threshold_exceeded(td)) 599 continue; 600 601 io_u = get_io_u(td); 602 if (!io_u) 603 break; 604 605 ddir = io_u->ddir; 606 607 /* 608 * Add verification end_io handler if: 609 * - Asked to verify (!td_rw(td)) 610 * - Or the io_u is from our verify list (mixed write/ver) 611 */ 612 if (td->o.verify != VERIFY_NONE && io_u->ddir == DDIR_READ && 613 ((io_u->flags & IO_U_F_VER_LIST) || !td_rw(td))) { 614 if (td->o.verify_async) 615 io_u->end_io = verify_io_u_async; 616 else 617 io_u->end_io = verify_io_u; 618 td_set_runstate(td, TD_VERIFYING); 619 } else if (in_ramp_time(td)) 620 td_set_runstate(td, TD_RAMP); 621 else 622 td_set_runstate(td, TD_RUNNING); 623 624 ret = td_io_queue(td, io_u); 625 switch (ret) { 626 case FIO_Q_COMPLETED: 627 if (io_u->error) { 628 ret = -io_u->error; 629 clear_io_u(td, io_u); 630 } else if (io_u->resid) { 631 int bytes = io_u->xfer_buflen - io_u->resid; 632 struct fio_file *f = io_u->file; 633 634 /* 635 * zero read, fail 636 */ 637 if (!bytes) { 638 td_verror(td, EIO, "full resid"); 639 put_io_u(td, io_u); 640 break; 641 } 642 643 io_u->xfer_buflen = io_u->resid; 644 io_u->xfer_buf += bytes; 645 io_u->offset += bytes; 646 647 if (ddir_rw(io_u->ddir)) 648 td->ts.short_io_u[io_u->ddir]++; 649 650 if (io_u->offset == f->real_file_size) 651 goto sync_done; 652 653 requeue_io_u(td, &io_u); 654 } else { 655sync_done: 656 if (__should_check_rate(td, DDIR_READ) || 657 __should_check_rate(td, DDIR_WRITE) || 658 __should_check_rate(td, DDIR_TRIM)) 659 fio_gettime(&comp_time, NULL); 660 661 ret = io_u_sync_complete(td, io_u, bytes_done); 662 if (ret < 0) 663 break; 664 } 665 break; 666 case FIO_Q_QUEUED: 667 /* 668 * if the engine doesn't have a commit hook, 669 * the io_u is really queued. if it does have such 670 * a hook, it has to call io_u_queued() itself. 671 */ 672 if (td->io_ops->commit == NULL) 673 io_u_queued(td, io_u); 674 break; 675 case FIO_Q_BUSY: 676 requeue_io_u(td, &io_u); 677 ret2 = td_io_commit(td); 678 if (ret2 < 0) 679 ret = ret2; 680 break; 681 default: 682 assert(ret < 0); 683 put_io_u(td, io_u); 684 break; 685 } 686 687 if (break_on_this_error(td, ddir, &ret)) 688 break; 689 690 /* 691 * See if we need to complete some commands. Note that we 692 * can get BUSY even without IO queued, if the system is 693 * resource starved. 694 */ 695 full = queue_full(td) || (ret == FIO_Q_BUSY && td->cur_depth); 696 if (full || !td->o.iodepth_batch_complete) { 697 min_evts = min(td->o.iodepth_batch_complete, 698 td->cur_depth); 699 /* 700 * if the queue is full, we MUST reap at least 1 event 701 */ 702 if (full && !min_evts) 703 min_evts = 1; 704 705 if (__should_check_rate(td, DDIR_READ) || 706 __should_check_rate(td, DDIR_WRITE) || 707 __should_check_rate(td, DDIR_TRIM)) 708 fio_gettime(&comp_time, NULL); 709 710 do { 711 ret = io_u_queued_complete(td, min_evts, bytes_done); 712 if (ret < 0) 713 break; 714 715 } while (full && (td->cur_depth > td->o.iodepth_low)); 716 } 717 718 if (ret < 0) 719 break; 720 if (!ddir_rw_sum(bytes_done)) 721 continue; 722 723 if (!in_ramp_time(td) && should_check_rate(td, bytes_done)) { 724 if (check_min_rate(td, &comp_time, bytes_done)) { 725 if (exitall_on_terminate) 726 fio_terminate_threads(td->groupid); 727 td_verror(td, EIO, "check_min_rate"); 728 break; 729 } 730 } 731 732 if (td->o.thinktime) { 733 unsigned long long b; 734 735 b = ddir_rw_sum(td->io_blocks); 736 if (!(b % td->o.thinktime_blocks)) { 737 int left; 738 739 if (td->o.thinktime_spin) 740 usec_spin(td->o.thinktime_spin); 741 742 left = td->o.thinktime - td->o.thinktime_spin; 743 if (left) 744 usec_sleep(td, left); 745 } 746 } 747 } 748 749 if (td->trim_entries) 750 log_err("fio: %d trim entries leaked?\n", td->trim_entries); 751 752 if (td->o.fill_device && td->error == ENOSPC) { 753 td->error = 0; 754 td->terminate = 1; 755 } 756 if (!td->error) { 757 struct fio_file *f; 758 759 i = td->cur_depth; 760 if (i) { 761 ret = io_u_queued_complete(td, i, NULL); 762 if (td->o.fill_device && td->error == ENOSPC) 763 td->error = 0; 764 } 765 766 if (should_fsync(td) && td->o.end_fsync) { 767 td_set_runstate(td, TD_FSYNCING); 768 769 for_each_file(td, f, i) { 770 if (!fio_file_open(f)) 771 continue; 772 fio_io_sync(td, f); 773 } 774 } 775 } else 776 cleanup_pending_aio(td); 777 778 /* 779 * stop job if we failed doing any IO 780 */ 781 if (!ddir_rw_sum(td->this_io_bytes)) 782 td->done = 1; 783} 784 785static void cleanup_io_u(struct thread_data *td) 786{ 787 struct flist_head *entry, *n; 788 struct io_u *io_u; 789 790 flist_for_each_safe(entry, n, &td->io_u_freelist) { 791 io_u = flist_entry(entry, struct io_u, list); 792 793 flist_del(&io_u->list); 794 fio_memfree(io_u, sizeof(*io_u)); 795 } 796 797 free_io_mem(td); 798} 799 800static int init_io_u(struct thread_data *td) 801{ 802 struct io_u *io_u; 803 unsigned int max_bs, min_write; 804 int cl_align, i, max_units; 805 char *p; 806 807 max_units = td->o.iodepth; 808 max_bs = max(td->o.max_bs[DDIR_READ], td->o.max_bs[DDIR_WRITE]); 809 max_bs = max(td->o.max_bs[DDIR_TRIM], max_bs); 810 min_write = td->o.min_bs[DDIR_WRITE]; 811 td->orig_buffer_size = (unsigned long long) max_bs 812 * (unsigned long long) max_units; 813 814 if (td->o.mem_type == MEM_SHMHUGE || td->o.mem_type == MEM_MMAPHUGE) { 815 unsigned long bs; 816 817 bs = td->orig_buffer_size + td->o.hugepage_size - 1; 818 td->orig_buffer_size = bs & ~(td->o.hugepage_size - 1); 819 } 820 821 if (td->orig_buffer_size != (size_t) td->orig_buffer_size) { 822 log_err("fio: IO memory too large. Reduce max_bs or iodepth\n"); 823 return 1; 824 } 825 826 if (allocate_io_mem(td)) 827 return 1; 828 829 if (td->o.odirect || td->o.mem_align || 830 (td->io_ops->flags & FIO_RAWIO)) 831 p = PAGE_ALIGN(td->orig_buffer) + td->o.mem_align; 832 else 833 p = td->orig_buffer; 834 835 cl_align = os_cache_line_size(); 836 837 for (i = 0; i < max_units; i++) { 838 void *ptr; 839 840 if (td->terminate) 841 return 1; 842 843 ptr = fio_memalign(cl_align, sizeof(*io_u)); 844 if (!ptr) { 845 log_err("fio: unable to allocate aligned memory\n"); 846 break; 847 } 848 849 io_u = ptr; 850 memset(io_u, 0, sizeof(*io_u)); 851 INIT_FLIST_HEAD(&io_u->list); 852 dprint(FD_MEM, "io_u alloc %p, index %u\n", io_u, i); 853 854 if (!(td->io_ops->flags & FIO_NOIO)) { 855 io_u->buf = p; 856 dprint(FD_MEM, "io_u %p, mem %p\n", io_u, io_u->buf); 857 858 if (td_write(td)) 859 io_u_fill_buffer(td, io_u, min_write, max_bs); 860 if (td_write(td) && td->o.verify_pattern_bytes) { 861 /* 862 * Fill the buffer with the pattern if we are 863 * going to be doing writes. 864 */ 865 fill_pattern(td, io_u->buf, max_bs, io_u, 0, 0); 866 } 867 } 868 869 io_u->index = i; 870 io_u->flags = IO_U_F_FREE; 871 flist_add(&io_u->list, &td->io_u_freelist); 872 p += max_bs; 873 } 874 875 return 0; 876} 877 878static int switch_ioscheduler(struct thread_data *td) 879{ 880 char tmp[256], tmp2[128]; 881 FILE *f; 882 int ret; 883 884 if (td->io_ops->flags & FIO_DISKLESSIO) 885 return 0; 886 887 sprintf(tmp, "%s/queue/scheduler", td->sysfs_root); 888 889 f = fopen(tmp, "r+"); 890 if (!f) { 891 if (errno == ENOENT) { 892 log_err("fio: os or kernel doesn't support IO scheduler" 893 " switching\n"); 894 return 0; 895 } 896 td_verror(td, errno, "fopen iosched"); 897 return 1; 898 } 899 900 /* 901 * Set io scheduler. 902 */ 903 ret = fwrite(td->o.ioscheduler, strlen(td->o.ioscheduler), 1, f); 904 if (ferror(f) || ret != 1) { 905 td_verror(td, errno, "fwrite"); 906 fclose(f); 907 return 1; 908 } 909 910 rewind(f); 911 912 /* 913 * Read back and check that the selected scheduler is now the default. 914 */ 915 ret = fread(tmp, 1, sizeof(tmp), f); 916 if (ferror(f) || ret < 0) { 917 td_verror(td, errno, "fread"); 918 fclose(f); 919 return 1; 920 } 921 922 sprintf(tmp2, "[%s]", td->o.ioscheduler); 923 if (!strstr(tmp, tmp2)) { 924 log_err("fio: io scheduler %s not found\n", td->o.ioscheduler); 925 td_verror(td, EINVAL, "iosched_switch"); 926 fclose(f); 927 return 1; 928 } 929 930 fclose(f); 931 return 0; 932} 933 934static int keep_running(struct thread_data *td) 935{ 936 if (td->done) 937 return 0; 938 if (td->o.time_based) 939 return 1; 940 if (td->o.loops) { 941 td->o.loops--; 942 return 1; 943 } 944 945 if (ddir_rw_sum(td->io_bytes) < td->o.size) 946 return 1; 947 948 return 0; 949} 950 951static int exec_string(const char *string) 952{ 953 int ret, newlen = strlen(string) + 1 + 8; 954 char *str; 955 956 str = malloc(newlen); 957 sprintf(str, "sh -c %s", string); 958 959 ret = system(str); 960 if (ret == -1) 961 log_err("fio: exec of cmd <%s> failed\n", str); 962 963 free(str); 964 return ret; 965} 966 967/* 968 * Entry point for the thread based jobs. The process based jobs end up 969 * here as well, after a little setup. 970 */ 971static void *thread_main(void *data) 972{ 973 unsigned long long elapsed; 974 struct thread_data *td = data; 975 pthread_condattr_t attr; 976 int clear_state; 977 978 if (!td->o.use_thread) { 979 setsid(); 980 td->pid = getpid(); 981 } else 982 td->pid = gettid(); 983 984 dprint(FD_PROCESS, "jobs pid=%d started\n", (int) td->pid); 985 986 INIT_FLIST_HEAD(&td->io_u_freelist); 987 INIT_FLIST_HEAD(&td->io_u_busylist); 988 INIT_FLIST_HEAD(&td->io_u_requeues); 989 INIT_FLIST_HEAD(&td->io_log_list); 990 INIT_FLIST_HEAD(&td->io_hist_list); 991 INIT_FLIST_HEAD(&td->verify_list); 992 INIT_FLIST_HEAD(&td->trim_list); 993 pthread_mutex_init(&td->io_u_lock, NULL); 994 td->io_hist_tree = RB_ROOT; 995 996 pthread_condattr_init(&attr); 997 pthread_cond_init(&td->verify_cond, &attr); 998 pthread_cond_init(&td->free_cond, &attr); 999 1000 td_set_runstate(td, TD_INITIALIZED); 1001 dprint(FD_MUTEX, "up startup_mutex\n"); 1002 fio_mutex_up(startup_mutex); 1003 dprint(FD_MUTEX, "wait on td->mutex\n"); 1004 fio_mutex_down(td->mutex); 1005 dprint(FD_MUTEX, "done waiting on td->mutex\n"); 1006 1007 /* 1008 * the ->mutex mutex is now no longer used, close it to avoid 1009 * eating a file descriptor 1010 */ 1011 fio_mutex_remove(td->mutex); 1012 1013 /* 1014 * A new gid requires privilege, so we need to do this before setting 1015 * the uid. 1016 */ 1017 if (td->o.gid != -1U && setgid(td->o.gid)) { 1018 td_verror(td, errno, "setgid"); 1019 goto err; 1020 } 1021 if (td->o.uid != -1U && setuid(td->o.uid)) { 1022 td_verror(td, errno, "setuid"); 1023 goto err; 1024 } 1025 1026 /* 1027 * If we have a gettimeofday() thread, make sure we exclude that 1028 * thread from this job 1029 */ 1030 if (td->o.gtod_cpu) 1031 fio_cpu_clear(&td->o.cpumask, td->o.gtod_cpu); 1032 1033 /* 1034 * Set affinity first, in case it has an impact on the memory 1035 * allocations. 1036 */ 1037 if (td->o.cpumask_set && fio_setaffinity(td->pid, td->o.cpumask) == -1) { 1038 td_verror(td, errno, "cpu_set_affinity"); 1039 goto err; 1040 } 1041 1042 /* 1043 * May alter parameters that init_io_u() will use, so we need to 1044 * do this first. 1045 */ 1046 if (init_iolog(td)) 1047 goto err; 1048 1049 if (init_io_u(td)) 1050 goto err; 1051 1052 if (td->o.verify_async && verify_async_init(td)) 1053 goto err; 1054 1055 if (td->ioprio_set) { 1056 if (ioprio_set(IOPRIO_WHO_PROCESS, 0, td->ioprio) == -1) { 1057 td_verror(td, errno, "ioprio_set"); 1058 goto err; 1059 } 1060 } 1061 1062 if (td->o.cgroup && cgroup_setup(td, cgroup_list, &cgroup_mnt)) 1063 goto err; 1064 1065 errno = 0; 1066 if (nice(td->o.nice) == -1 && errno != 0) { 1067 td_verror(td, errno, "nice"); 1068 goto err; 1069 } 1070 1071 if (td->o.ioscheduler && switch_ioscheduler(td)) 1072 goto err; 1073 1074 if (!td->o.create_serialize && setup_files(td)) 1075 goto err; 1076 1077 if (td_io_init(td)) 1078 goto err; 1079 1080 if (init_random_map(td)) 1081 goto err; 1082 1083 if (td->o.exec_prerun) { 1084 if (exec_string(td->o.exec_prerun)) 1085 goto err; 1086 } 1087 1088 if (td->o.pre_read) { 1089 if (pre_read_files(td) < 0) 1090 goto err; 1091 } 1092 1093 fio_gettime(&td->epoch, NULL); 1094 getrusage(RUSAGE_SELF, &td->ru_start); 1095 1096 clear_state = 0; 1097 while (keep_running(td)) { 1098 fio_gettime(&td->start, NULL); 1099 memcpy(&td->bw_sample_time, &td->start, sizeof(td->start)); 1100 memcpy(&td->iops_sample_time, &td->start, sizeof(td->start)); 1101 memcpy(&td->tv_cache, &td->start, sizeof(td->start)); 1102 1103 if (td->o.ratemin[DDIR_READ] || td->o.ratemin[DDIR_WRITE] || 1104 td->o.ratemin[DDIR_TRIM]) { 1105 memcpy(&td->lastrate[DDIR_READ], &td->bw_sample_time, 1106 sizeof(td->bw_sample_time)); 1107 memcpy(&td->lastrate[DDIR_WRITE], &td->bw_sample_time, 1108 sizeof(td->bw_sample_time)); 1109 memcpy(&td->lastrate[DDIR_TRIM], &td->bw_sample_time, 1110 sizeof(td->bw_sample_time)); 1111 } 1112 1113 if (clear_state) 1114 clear_io_state(td); 1115 1116 prune_io_piece_log(td); 1117 1118 do_io(td); 1119 1120 clear_state = 1; 1121 1122 if (td_read(td) && td->io_bytes[DDIR_READ]) { 1123 elapsed = utime_since_now(&td->start); 1124 td->ts.runtime[DDIR_READ] += elapsed; 1125 } 1126 if (td_write(td) && td->io_bytes[DDIR_WRITE]) { 1127 elapsed = utime_since_now(&td->start); 1128 td->ts.runtime[DDIR_WRITE] += elapsed; 1129 } 1130 if (td_trim(td) && td->io_bytes[DDIR_TRIM]) { 1131 elapsed = utime_since_now(&td->start); 1132 td->ts.runtime[DDIR_TRIM] += elapsed; 1133 } 1134 1135 if (td->error || td->terminate) 1136 break; 1137 1138 if (!td->o.do_verify || 1139 td->o.verify == VERIFY_NONE || 1140 (td->io_ops->flags & FIO_UNIDIR)) 1141 continue; 1142 1143 clear_io_state(td); 1144 1145 fio_gettime(&td->start, NULL); 1146 1147 do_verify(td); 1148 1149 td->ts.runtime[DDIR_READ] += utime_since_now(&td->start); 1150 1151 if (td->error || td->terminate) 1152 break; 1153 } 1154 1155 update_rusage_stat(td); 1156 td->ts.runtime[DDIR_READ] = (td->ts.runtime[DDIR_READ] + 999) / 1000; 1157 td->ts.runtime[DDIR_WRITE] = (td->ts.runtime[DDIR_WRITE] + 999) / 1000; 1158 td->ts.runtime[DDIR_TRIM] = (td->ts.runtime[DDIR_TRIM] + 999) / 1000; 1159 td->ts.total_run_time = mtime_since_now(&td->epoch); 1160 td->ts.io_bytes[DDIR_READ] = td->io_bytes[DDIR_READ]; 1161 td->ts.io_bytes[DDIR_WRITE] = td->io_bytes[DDIR_WRITE]; 1162 td->ts.io_bytes[DDIR_TRIM] = td->io_bytes[DDIR_TRIM]; 1163 1164 fio_mutex_down(writeout_mutex); 1165 if (td->bw_log) { 1166 if (td->o.bw_log_file) { 1167 finish_log_named(td, td->bw_log, 1168 td->o.bw_log_file, "bw"); 1169 } else 1170 finish_log(td, td->bw_log, "bw"); 1171 } 1172 if (td->lat_log) { 1173 if (td->o.lat_log_file) { 1174 finish_log_named(td, td->lat_log, 1175 td->o.lat_log_file, "lat"); 1176 } else 1177 finish_log(td, td->lat_log, "lat"); 1178 } 1179 if (td->slat_log) { 1180 if (td->o.lat_log_file) { 1181 finish_log_named(td, td->slat_log, 1182 td->o.lat_log_file, "slat"); 1183 } else 1184 finish_log(td, td->slat_log, "slat"); 1185 } 1186 if (td->clat_log) { 1187 if (td->o.lat_log_file) { 1188 finish_log_named(td, td->clat_log, 1189 td->o.lat_log_file, "clat"); 1190 } else 1191 finish_log(td, td->clat_log, "clat"); 1192 } 1193 if (td->iops_log) { 1194 if (td->o.iops_log_file) { 1195 finish_log_named(td, td->iops_log, 1196 td->o.iops_log_file, "iops"); 1197 } else 1198 finish_log(td, td->iops_log, "iops"); 1199 } 1200 1201 fio_mutex_up(writeout_mutex); 1202 if (td->o.exec_postrun) 1203 exec_string(td->o.exec_postrun); 1204 1205 if (exitall_on_terminate) 1206 fio_terminate_threads(td->groupid); 1207 1208err: 1209 if (td->error) 1210 log_info("fio: pid=%d, err=%d/%s\n", (int) td->pid, td->error, 1211 td->verror); 1212 1213 if (td->o.verify_async) 1214 verify_async_exit(td); 1215 1216 close_and_free_files(td); 1217 close_ioengine(td); 1218 cleanup_io_u(td); 1219 cgroup_shutdown(td, &cgroup_mnt); 1220 1221 if (td->o.cpumask_set) { 1222 int ret = fio_cpuset_exit(&td->o.cpumask); 1223 1224 td_verror(td, ret, "fio_cpuset_exit"); 1225 } 1226 1227 /* 1228 * do this very late, it will log file closing as well 1229 */ 1230 if (td->o.write_iolog_file) 1231 write_iolog_close(td); 1232 1233 td_set_runstate(td, TD_EXITED); 1234 return (void *) (uintptr_t) td->error; 1235} 1236 1237 1238/* 1239 * We cannot pass the td data into a forked process, so attach the td and 1240 * pass it to the thread worker. 1241 */ 1242static int fork_main(int shmid, int offset) 1243{ 1244 struct thread_data *td; 1245 void *data, *ret; 1246 1247#ifndef __hpux 1248 data = shmat(shmid, NULL, 0); 1249 if (data == (void *) -1) { 1250 int __err = errno; 1251 1252 perror("shmat"); 1253 return __err; 1254 } 1255#else 1256 /* 1257 * HP-UX inherits shm mappings? 1258 */ 1259 data = threads; 1260#endif 1261 1262 td = data + offset * sizeof(struct thread_data); 1263 ret = thread_main(td); 1264 shmdt(data); 1265 return (int) (uintptr_t) ret; 1266} 1267 1268/* 1269 * Run over the job map and reap the threads that have exited, if any. 1270 */ 1271static void reap_threads(unsigned int *nr_running, unsigned int *t_rate, 1272 unsigned int *m_rate) 1273{ 1274 struct thread_data *td; 1275 unsigned int cputhreads, realthreads, pending; 1276 int i, status, ret; 1277 1278 /* 1279 * reap exited threads (TD_EXITED -> TD_REAPED) 1280 */ 1281 realthreads = pending = cputhreads = 0; 1282 for_each_td(td, i) { 1283 int flags = 0; 1284 1285 /* 1286 * ->io_ops is NULL for a thread that has closed its 1287 * io engine 1288 */ 1289 if (td->io_ops && !strcmp(td->io_ops->name, "cpuio")) 1290 cputhreads++; 1291 else 1292 realthreads++; 1293 1294 if (!td->pid) { 1295 pending++; 1296 continue; 1297 } 1298 if (td->runstate == TD_REAPED) 1299 continue; 1300 if (td->o.use_thread) { 1301 if (td->runstate == TD_EXITED) { 1302 td_set_runstate(td, TD_REAPED); 1303 goto reaped; 1304 } 1305 continue; 1306 } 1307 1308 flags = WNOHANG; 1309 if (td->runstate == TD_EXITED) 1310 flags = 0; 1311 1312 /* 1313 * check if someone quit or got killed in an unusual way 1314 */ 1315 ret = waitpid(td->pid, &status, flags); 1316 if (ret < 0) { 1317 if (errno == ECHILD) { 1318 log_err("fio: pid=%d disappeared %d\n", 1319 (int) td->pid, td->runstate); 1320 td->sig = ECHILD; 1321 td_set_runstate(td, TD_REAPED); 1322 goto reaped; 1323 } 1324 perror("waitpid"); 1325 } else if (ret == td->pid) { 1326 if (WIFSIGNALED(status)) { 1327 int sig = WTERMSIG(status); 1328 1329 if (sig != SIGTERM) 1330 log_err("fio: pid=%d, got signal=%d\n", 1331 (int) td->pid, sig); 1332 td->sig = sig; 1333 td_set_runstate(td, TD_REAPED); 1334 goto reaped; 1335 } 1336 if (WIFEXITED(status)) { 1337 if (WEXITSTATUS(status) && !td->error) 1338 td->error = WEXITSTATUS(status); 1339 1340 td_set_runstate(td, TD_REAPED); 1341 goto reaped; 1342 } 1343 } 1344 1345 /* 1346 * thread is not dead, continue 1347 */ 1348 pending++; 1349 continue; 1350reaped: 1351 (*nr_running)--; 1352 (*m_rate) -= ddir_rw_sum(td->o.ratemin); 1353 (*t_rate) -= ddir_rw_sum(td->o.rate); 1354 if (!td->pid) 1355 pending--; 1356 1357 if (td->error) 1358 exit_value++; 1359 1360 done_secs += mtime_since_now(&td->epoch) / 1000; 1361 } 1362 1363 if (*nr_running == cputhreads && !pending && realthreads) 1364 fio_terminate_threads(TERMINATE_ALL); 1365} 1366 1367/* 1368 * Main function for kicking off and reaping jobs, as needed. 1369 */ 1370static void run_threads(void) 1371{ 1372 struct thread_data *td; 1373 unsigned long spent; 1374 unsigned int i, todo, nr_running, m_rate, t_rate, nr_started; 1375 1376 if (fio_pin_memory()) 1377 return; 1378 1379 if (fio_gtod_offload && fio_start_gtod_thread()) 1380 return; 1381 1382 set_sig_handlers(); 1383 1384 if (output_format == FIO_OUTPUT_NORMAL) { 1385 log_info("Starting "); 1386 if (nr_thread) 1387 log_info("%d thread%s", nr_thread, 1388 nr_thread > 1 ? "s" : ""); 1389 if (nr_process) { 1390 if (nr_thread) 1391 log_info(" and "); 1392 log_info("%d process%s", nr_process, 1393 nr_process > 1 ? "es" : ""); 1394 } 1395 log_info("\n"); 1396 fflush(stdout); 1397 } 1398 1399 todo = thread_number; 1400 nr_running = 0; 1401 nr_started = 0; 1402 m_rate = t_rate = 0; 1403 1404 for_each_td(td, i) { 1405 print_status_init(td->thread_number - 1); 1406 1407 if (!td->o.create_serialize) 1408 continue; 1409 1410 /* 1411 * do file setup here so it happens sequentially, 1412 * we don't want X number of threads getting their 1413 * client data interspersed on disk 1414 */ 1415 if (setup_files(td)) { 1416 exit_value++; 1417 if (td->error) 1418 log_err("fio: pid=%d, err=%d/%s\n", 1419 (int) td->pid, td->error, td->verror); 1420 td_set_runstate(td, TD_REAPED); 1421 todo--; 1422 } else { 1423 struct fio_file *f; 1424 unsigned int j; 1425 1426 /* 1427 * for sharing to work, each job must always open 1428 * its own files. so close them, if we opened them 1429 * for creation 1430 */ 1431 for_each_file(td, f, j) { 1432 if (fio_file_open(f)) 1433 td_io_close_file(td, f); 1434 } 1435 } 1436 } 1437 1438 set_genesis_time(); 1439 1440 while (todo) { 1441 struct thread_data *map[REAL_MAX_JOBS]; 1442 struct timeval this_start; 1443 int this_jobs = 0, left; 1444 1445 /* 1446 * create threads (TD_NOT_CREATED -> TD_CREATED) 1447 */ 1448 for_each_td(td, i) { 1449 if (td->runstate != TD_NOT_CREATED) 1450 continue; 1451 1452 /* 1453 * never got a chance to start, killed by other 1454 * thread for some reason 1455 */ 1456 if (td->terminate) { 1457 todo--; 1458 continue; 1459 } 1460 1461 if (td->o.start_delay) { 1462 spent = mtime_since_genesis(); 1463 1464 if (td->o.start_delay * 1000 > spent) 1465 continue; 1466 } 1467 1468 if (td->o.stonewall && (nr_started || nr_running)) { 1469 dprint(FD_PROCESS, "%s: stonewall wait\n", 1470 td->o.name); 1471 break; 1472 } 1473 1474 init_disk_util(td); 1475 1476 /* 1477 * Set state to created. Thread will transition 1478 * to TD_INITIALIZED when it's done setting up. 1479 */ 1480 td_set_runstate(td, TD_CREATED); 1481 map[this_jobs++] = td; 1482 nr_started++; 1483 1484 if (td->o.use_thread) { 1485 int ret; 1486 1487 dprint(FD_PROCESS, "will pthread_create\n"); 1488 ret = pthread_create(&td->thread, NULL, 1489 thread_main, td); 1490 if (ret) { 1491 log_err("pthread_create: %s\n", 1492 strerror(ret)); 1493 nr_started--; 1494 break; 1495 } 1496 ret = pthread_detach(td->thread); 1497 if (ret) 1498 log_err("pthread_detach: %s", 1499 strerror(ret)); 1500 } else { 1501 pid_t pid; 1502 dprint(FD_PROCESS, "will fork\n"); 1503 pid = fork(); 1504 if (!pid) { 1505 int ret = fork_main(shm_id, i); 1506 1507 _exit(ret); 1508 } else if (i == fio_debug_jobno) 1509 *fio_debug_jobp = pid; 1510 } 1511 dprint(FD_MUTEX, "wait on startup_mutex\n"); 1512 if (fio_mutex_down_timeout(startup_mutex, 10)) { 1513 log_err("fio: job startup hung? exiting.\n"); 1514 fio_terminate_threads(TERMINATE_ALL); 1515 fio_abort = 1; 1516 nr_started--; 1517 break; 1518 } 1519 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 1520 } 1521 1522 /* 1523 * Wait for the started threads to transition to 1524 * TD_INITIALIZED. 1525 */ 1526 fio_gettime(&this_start, NULL); 1527 left = this_jobs; 1528 while (left && !fio_abort) { 1529 if (mtime_since_now(&this_start) > JOB_START_TIMEOUT) 1530 break; 1531 1532 usleep(100000); 1533 1534 for (i = 0; i < this_jobs; i++) { 1535 td = map[i]; 1536 if (!td) 1537 continue; 1538 if (td->runstate == TD_INITIALIZED) { 1539 map[i] = NULL; 1540 left--; 1541 } else if (td->runstate >= TD_EXITED) { 1542 map[i] = NULL; 1543 left--; 1544 todo--; 1545 nr_running++; /* work-around... */ 1546 } 1547 } 1548 } 1549 1550 if (left) { 1551 log_err("fio: %d job%s failed to start\n", left, 1552 left > 1 ? "s" : ""); 1553 for (i = 0; i < this_jobs; i++) { 1554 td = map[i]; 1555 if (!td) 1556 continue; 1557 kill(td->pid, SIGTERM); 1558 } 1559 break; 1560 } 1561 1562 /* 1563 * start created threads (TD_INITIALIZED -> TD_RUNNING). 1564 */ 1565 for_each_td(td, i) { 1566 if (td->runstate != TD_INITIALIZED) 1567 continue; 1568 1569 if (in_ramp_time(td)) 1570 td_set_runstate(td, TD_RAMP); 1571 else 1572 td_set_runstate(td, TD_RUNNING); 1573 nr_running++; 1574 nr_started--; 1575 m_rate += ddir_rw_sum(td->o.ratemin); 1576 t_rate += ddir_rw_sum(td->o.rate); 1577 todo--; 1578 fio_mutex_up(td->mutex); 1579 } 1580 1581 reap_threads(&nr_running, &t_rate, &m_rate); 1582 1583 if (todo) { 1584 if (is_backend) 1585 fio_server_idle_loop(); 1586 else 1587 usleep(100000); 1588 } 1589 } 1590 1591 while (nr_running) { 1592 reap_threads(&nr_running, &t_rate, &m_rate); 1593 1594 if (is_backend) 1595 fio_server_idle_loop(); 1596 else 1597 usleep(10000); 1598 } 1599 1600 update_io_ticks(); 1601 fio_unpin_memory(); 1602} 1603 1604void wait_for_disk_thread_exit(void) 1605{ 1606 fio_mutex_down(disk_thread_mutex); 1607} 1608 1609static void *disk_thread_main(void *data) 1610{ 1611 int ret = 0; 1612 1613 fio_mutex_up(startup_mutex); 1614 1615 while (threads && !ret) { 1616 usleep(DISK_UTIL_MSEC * 1000); 1617 if (!threads) 1618 break; 1619 ret = update_io_ticks(); 1620 1621 if (!is_backend) 1622 print_thread_status(); 1623 } 1624 1625 fio_mutex_up(disk_thread_mutex); 1626 return NULL; 1627} 1628 1629static int create_disk_util_thread(void) 1630{ 1631 int ret; 1632 1633 setup_disk_util(); 1634 1635 disk_thread_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 1636 1637 ret = pthread_create(&disk_util_thread, NULL, disk_thread_main, NULL); 1638 if (ret) { 1639 fio_mutex_remove(disk_thread_mutex); 1640 log_err("Can't create disk util thread: %s\n", strerror(ret)); 1641 return 1; 1642 } 1643 1644 ret = pthread_detach(disk_util_thread); 1645 if (ret) { 1646 fio_mutex_remove(disk_thread_mutex); 1647 log_err("Can't detatch disk util thread: %s\n", strerror(ret)); 1648 return 1; 1649 } 1650 1651 dprint(FD_MUTEX, "wait on startup_mutex\n"); 1652 fio_mutex_down(startup_mutex); 1653 dprint(FD_MUTEX, "done waiting on startup_mutex\n"); 1654 return 0; 1655} 1656 1657int fio_backend(void) 1658{ 1659 struct thread_data *td; 1660 int i; 1661 1662 if (exec_profile) { 1663 if (load_profile(exec_profile)) 1664 return 1; 1665 free(exec_profile); 1666 exec_profile = NULL; 1667 } 1668 if (!thread_number) 1669 return 0; 1670 1671 if (write_bw_log) { 1672 setup_log(&agg_io_log[DDIR_READ], 0); 1673 setup_log(&agg_io_log[DDIR_WRITE], 0); 1674 setup_log(&agg_io_log[DDIR_TRIM], 0); 1675 } 1676 1677 startup_mutex = fio_mutex_init(FIO_MUTEX_LOCKED); 1678 if (startup_mutex == NULL) 1679 return 1; 1680 writeout_mutex = fio_mutex_init(FIO_MUTEX_UNLOCKED); 1681 if (writeout_mutex == NULL) 1682 return 1; 1683 1684 set_genesis_time(); 1685 create_disk_util_thread(); 1686 1687 cgroup_list = smalloc(sizeof(*cgroup_list)); 1688 INIT_FLIST_HEAD(cgroup_list); 1689 1690 run_threads(); 1691 1692 if (!fio_abort) { 1693 show_run_stats(); 1694 if (write_bw_log) { 1695 __finish_log(agg_io_log[DDIR_READ], "agg-read_bw.log"); 1696 __finish_log(agg_io_log[DDIR_WRITE], 1697 "agg-write_bw.log"); 1698 __finish_log(agg_io_log[DDIR_TRIM], 1699 "agg-write_bw.log"); 1700 } 1701 } 1702 1703 for_each_td(td, i) 1704 fio_options_free(td); 1705 1706 free_disk_util(); 1707 cgroup_kill(cgroup_list); 1708 sfree(cgroup_list); 1709 sfree(cgroup_mnt); 1710 1711 fio_mutex_remove(startup_mutex); 1712 fio_mutex_remove(writeout_mutex); 1713 fio_mutex_remove(disk_thread_mutex); 1714 return exit_value; 1715} 1716