time.c revision 5ec10eaad3b09875b91e19a20bbdfa06f2117562
1#include <time.h> 2#include <sys/time.h> 3 4#include "fio.h" 5 6static struct timeval genesis; 7static unsigned long ns_granularity; 8 9unsigned long long utime_since(struct timeval *s, struct timeval *e) 10{ 11 long sec, usec; 12 unsigned long long ret; 13 14 sec = e->tv_sec - s->tv_sec; 15 usec = e->tv_usec - s->tv_usec; 16 if (sec > 0 && usec < 0) { 17 sec--; 18 usec += 1000000; 19 } 20 21 /* 22 * time warp bug on some kernels? 23 */ 24 if (sec < 0 || (sec == 0 && usec < 0)) 25 return 0; 26 27 ret = sec * 1000000ULL + usec; 28 29 return ret; 30} 31 32unsigned long long utime_since_now(struct timeval *s) 33{ 34 struct timeval t; 35 36 fio_gettime(&t, NULL); 37 return utime_since(s, &t); 38} 39 40unsigned long mtime_since(struct timeval *s, struct timeval *e) 41{ 42 long sec, usec, ret; 43 44 sec = e->tv_sec - s->tv_sec; 45 usec = e->tv_usec - s->tv_usec; 46 if (sec > 0 && usec < 0) { 47 sec--; 48 usec += 1000000; 49 } 50 51 sec *= 1000UL; 52 usec /= 1000UL; 53 ret = sec + usec; 54 55 /* 56 * time warp bug on some kernels? 57 */ 58 if (ret < 0) 59 ret = 0; 60 61 return ret; 62} 63 64unsigned long mtime_since_now(struct timeval *s) 65{ 66 struct timeval t; 67 void *p = __builtin_return_address(0); 68 69 fio_gettime(&t, p); 70 return mtime_since(s, &t); 71} 72 73unsigned long time_since_now(struct timeval *s) 74{ 75 return mtime_since_now(s) / 1000; 76} 77 78/* 79 * busy looping version for the last few usec 80 */ 81void __usec_sleep(unsigned int usec) 82{ 83 struct timeval start; 84 85 fio_gettime(&start, NULL); 86 while (utime_since_now(&start) < usec) 87 nop; 88} 89 90void usec_sleep(struct thread_data *td, unsigned long usec) 91{ 92 struct timespec req; 93 struct timeval tv; 94 95 do { 96 unsigned long ts = usec; 97 98 if (usec < ns_granularity) { 99 __usec_sleep(usec); 100 break; 101 } 102 103 ts = usec - ns_granularity; 104 105 if (ts >= 1000000) { 106 req.tv_sec = ts / 1000000; 107 ts -= 1000000 * req.tv_sec; 108 } else 109 req.tv_sec = 0; 110 111 req.tv_nsec = ts * 1000; 112 fio_gettime(&tv, NULL); 113 114 if (nanosleep(&req, NULL) < 0) 115 break; 116 117 ts = utime_since_now(&tv); 118 if (ts >= usec) 119 break; 120 121 usec -= ts; 122 } while (!td->terminate); 123} 124 125void rate_throttle(struct thread_data *td, unsigned long time_spent, 126 unsigned int bytes) 127{ 128 unsigned long usec_cycle; 129 unsigned int bs; 130 131 if (!td->o.rate && !td->o.rate_iops) 132 return; 133 134 if (td_rw(td)) 135 bs = td->o.rw_min_bs; 136 else if (td_read(td)) 137 bs = td->o.min_bs[DDIR_READ]; 138 else 139 bs = td->o.min_bs[DDIR_WRITE]; 140 141 usec_cycle = td->rate_usec_cycle * (bytes / bs); 142 143 if (time_spent < usec_cycle) { 144 unsigned long s = usec_cycle - time_spent; 145 146 td->rate_pending_usleep += s; 147 148 if (td->rate_pending_usleep >= 100000) { 149 struct timeval t; 150 151 fio_gettime(&t, NULL); 152 usec_sleep(td, td->rate_pending_usleep); 153 td->rate_pending_usleep -= utime_since_now(&t); 154 } 155 } else { 156 long overtime = time_spent - usec_cycle; 157 158 td->rate_pending_usleep -= overtime; 159 } 160} 161 162unsigned long mtime_since_genesis(void) 163{ 164 return mtime_since_now(&genesis); 165} 166 167static void fio_init time_init(void) 168{ 169 int i; 170 171 /* 172 * Check the granularity of the nanosleep function 173 */ 174 for (i = 0; i < 10; i++) { 175 struct timeval tv; 176 struct timespec ts; 177 unsigned long elapsed; 178 179 fio_gettime(&tv, NULL); 180 ts.tv_sec = 0; 181 ts.tv_nsec = 1000; 182 183 nanosleep(&ts, NULL); 184 elapsed = utime_since_now(&tv); 185 186 if (elapsed > ns_granularity) 187 ns_granularity = elapsed; 188 } 189} 190 191void set_genesis_time(void) 192{ 193 fio_gettime(&genesis, NULL); 194} 195 196void fill_start_time(struct timeval *t) 197{ 198 memcpy(t, &genesis, sizeof(genesis)); 199} 200