1/* 2 * menu.c - the menu idle governor 3 * 4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> 5 * Copyright (C) 2009 Intel Corporation 6 * Author: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This code is licenced under the GPL version 2 as described 10 * in the COPYING file that acompanies the Linux Kernel. 11 */ 12 13#include <linux/kernel.h> 14#include <linux/cpuidle.h> 15#include <linux/pm_qos.h> 16#include <linux/time.h> 17#include <linux/ktime.h> 18#include <linux/hrtimer.h> 19#include <linux/tick.h> 20#include <linux/sched.h> 21#include <linux/math64.h> 22#include <linux/module.h> 23 24#define BUCKETS 12 25#define INTERVALS 8 26#define RESOLUTION 1024 27#define DECAY 8 28#define MAX_INTERESTING 50000 29#define STDDEV_THRESH 400 30 31 32/* 33 * Concepts and ideas behind the menu governor 34 * 35 * For the menu governor, there are 3 decision factors for picking a C 36 * state: 37 * 1) Energy break even point 38 * 2) Performance impact 39 * 3) Latency tolerance (from pmqos infrastructure) 40 * These these three factors are treated independently. 41 * 42 * Energy break even point 43 * ----------------------- 44 * C state entry and exit have an energy cost, and a certain amount of time in 45 * the C state is required to actually break even on this cost. CPUIDLE 46 * provides us this duration in the "target_residency" field. So all that we 47 * need is a good prediction of how long we'll be idle. Like the traditional 48 * menu governor, we start with the actual known "next timer event" time. 49 * 50 * Since there are other source of wakeups (interrupts for example) than 51 * the next timer event, this estimation is rather optimistic. To get a 52 * more realistic estimate, a correction factor is applied to the estimate, 53 * that is based on historic behavior. For example, if in the past the actual 54 * duration always was 50% of the next timer tick, the correction factor will 55 * be 0.5. 56 * 57 * menu uses a running average for this correction factor, however it uses a 58 * set of factors, not just a single factor. This stems from the realization 59 * that the ratio is dependent on the order of magnitude of the expected 60 * duration; if we expect 500 milliseconds of idle time the likelihood of 61 * getting an interrupt very early is much higher than if we expect 50 micro 62 * seconds of idle time. A second independent factor that has big impact on 63 * the actual factor is if there is (disk) IO outstanding or not. 64 * (as a special twist, we consider every sleep longer than 50 milliseconds 65 * as perfect; there are no power gains for sleeping longer than this) 66 * 67 * For these two reasons we keep an array of 12 independent factors, that gets 68 * indexed based on the magnitude of the expected duration as well as the 69 * "is IO outstanding" property. 70 * 71 * Repeatable-interval-detector 72 * ---------------------------- 73 * There are some cases where "next timer" is a completely unusable predictor: 74 * Those cases where the interval is fixed, for example due to hardware 75 * interrupt mitigation, but also due to fixed transfer rate devices such as 76 * mice. 77 * For this, we use a different predictor: We track the duration of the last 8 78 * intervals and if the stand deviation of these 8 intervals is below a 79 * threshold value, we use the average of these intervals as prediction. 80 * 81 * Limiting Performance Impact 82 * --------------------------- 83 * C states, especially those with large exit latencies, can have a real 84 * noticeable impact on workloads, which is not acceptable for most sysadmins, 85 * and in addition, less performance has a power price of its own. 86 * 87 * As a general rule of thumb, menu assumes that the following heuristic 88 * holds: 89 * The busier the system, the less impact of C states is acceptable 90 * 91 * This rule-of-thumb is implemented using a performance-multiplier: 92 * If the exit latency times the performance multiplier is longer than 93 * the predicted duration, the C state is not considered a candidate 94 * for selection due to a too high performance impact. So the higher 95 * this multiplier is, the longer we need to be idle to pick a deep C 96 * state, and thus the less likely a busy CPU will hit such a deep 97 * C state. 98 * 99 * Two factors are used in determing this multiplier: 100 * a value of 10 is added for each point of "per cpu load average" we have. 101 * a value of 5 points is added for each process that is waiting for 102 * IO on this CPU. 103 * (these values are experimentally determined) 104 * 105 * The load average factor gives a longer term (few seconds) input to the 106 * decision, while the iowait value gives a cpu local instantanious input. 107 * The iowait factor may look low, but realize that this is also already 108 * represented in the system load average. 109 * 110 */ 111 112struct menu_device { 113 int last_state_idx; 114 int needs_update; 115 116 unsigned int expected_us; 117 u64 predicted_us; 118 unsigned int exit_us; 119 unsigned int bucket; 120 u64 correction_factor[BUCKETS]; 121 u32 intervals[INTERVALS]; 122 int interval_ptr; 123}; 124 125 126#define LOAD_INT(x) ((x) >> FSHIFT) 127#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) 128 129static int get_loadavg(void) 130{ 131 unsigned long this = this_cpu_load(); 132 133 134 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10; 135} 136 137static inline int which_bucket(unsigned int duration) 138{ 139 int bucket = 0; 140 141 /* 142 * We keep two groups of stats; one with no 143 * IO pending, one without. 144 * This allows us to calculate 145 * E(duration)|iowait 146 */ 147 if (nr_iowait_cpu(smp_processor_id())) 148 bucket = BUCKETS/2; 149 150 if (duration < 10) 151 return bucket; 152 if (duration < 100) 153 return bucket + 1; 154 if (duration < 1000) 155 return bucket + 2; 156 if (duration < 10000) 157 return bucket + 3; 158 if (duration < 100000) 159 return bucket + 4; 160 return bucket + 5; 161} 162 163/* 164 * Return a multiplier for the exit latency that is intended 165 * to take performance requirements into account. 166 * The more performance critical we estimate the system 167 * to be, the higher this multiplier, and thus the higher 168 * the barrier to go to an expensive C state. 169 */ 170static inline int performance_multiplier(void) 171{ 172 int mult = 1; 173 174 /* for higher loadavg, we are more reluctant */ 175 176 /* 177 * this doesn't work as intended - it is almost always 0, but can 178 * sometimes, depending on workload, spike very high into the hundreds 179 * even when the average cpu load is under 10%. 180 */ 181 /* mult += 2 * get_loadavg(); */ 182 183 /* for IO wait tasks (per cpu!) we add 5x each */ 184 mult += 10 * nr_iowait_cpu(smp_processor_id()); 185 186 return mult; 187} 188 189static DEFINE_PER_CPU(struct menu_device, menu_devices); 190 191static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); 192 193/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 194static u64 div_round64(u64 dividend, u32 divisor) 195{ 196 return div_u64(dividend + (divisor / 2), divisor); 197} 198 199/* 200 * Try detecting repeating patterns by keeping track of the last 8 201 * intervals, and checking if the standard deviation of that set 202 * of points is below a threshold. If it is... then use the 203 * average of these 8 points as the estimated value. 204 */ 205static void detect_repeating_patterns(struct menu_device *data) 206{ 207 int i; 208 uint64_t avg = 0; 209 uint64_t stddev = 0; /* contains the square of the std deviation */ 210 211 /* first calculate average and standard deviation of the past */ 212 for (i = 0; i < INTERVALS; i++) 213 avg += data->intervals[i]; 214 avg = avg / INTERVALS; 215 216 /* if the avg is beyond the known next tick, it's worthless */ 217 if (avg > data->expected_us) 218 return; 219 220 for (i = 0; i < INTERVALS; i++) 221 stddev += (data->intervals[i] - avg) * 222 (data->intervals[i] - avg); 223 224 stddev = stddev / INTERVALS; 225 226 /* 227 * now.. if stddev is small.. then assume we have a 228 * repeating pattern and predict we keep doing this. 229 */ 230 231 if (avg && stddev < STDDEV_THRESH) 232 data->predicted_us = avg; 233} 234 235/** 236 * menu_select - selects the next idle state to enter 237 * @drv: cpuidle driver containing state data 238 * @dev: the CPU 239 */ 240static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 241{ 242 struct menu_device *data = &__get_cpu_var(menu_devices); 243 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 244 int power_usage = -1; 245 int i; 246 int multiplier; 247 struct timespec t; 248 249 if (data->needs_update) { 250 menu_update(drv, dev); 251 data->needs_update = 0; 252 } 253 254 data->last_state_idx = 0; 255 data->exit_us = 0; 256 257 /* Special case when user has set very strict latency requirement */ 258 if (unlikely(latency_req == 0)) 259 return 0; 260 261 /* determine the expected residency time, round up */ 262 t = ktime_to_timespec(tick_nohz_get_sleep_length()); 263 data->expected_us = 264 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC; 265 266 267 data->bucket = which_bucket(data->expected_us); 268 269 multiplier = performance_multiplier(); 270 271 /* 272 * if the correction factor is 0 (eg first time init or cpu hotplug 273 * etc), we actually want to start out with a unity factor. 274 */ 275 if (data->correction_factor[data->bucket] == 0) 276 data->correction_factor[data->bucket] = RESOLUTION * DECAY; 277 278 /* Make sure to round up for half microseconds */ 279 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], 280 RESOLUTION * DECAY); 281 282 detect_repeating_patterns(data); 283 284 /* 285 * We want to default to C1 (hlt), not to busy polling 286 * unless the timer is happening really really soon. 287 */ 288 if (data->expected_us > 5 && 289 drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0) 290 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 291 292 /* 293 * Find the idle state with the lowest power while satisfying 294 * our constraints. 295 */ 296 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 297 struct cpuidle_state *s = &drv->states[i]; 298 299 if (s->disable) 300 continue; 301 if (s->target_residency > data->predicted_us) 302 continue; 303 if (s->exit_latency > latency_req) 304 continue; 305 if (s->exit_latency * multiplier > data->predicted_us) 306 continue; 307 308 if (s->power_usage < power_usage) { 309 power_usage = s->power_usage; 310 data->last_state_idx = i; 311 data->exit_us = s->exit_latency; 312 } 313 } 314 315 return data->last_state_idx; 316} 317 318/** 319 * menu_reflect - records that data structures need update 320 * @dev: the CPU 321 * @index: the index of actual entered state 322 * 323 * NOTE: it's important to be fast here because this operation will add to 324 * the overall exit latency. 325 */ 326static void menu_reflect(struct cpuidle_device *dev, int index) 327{ 328 struct menu_device *data = &__get_cpu_var(menu_devices); 329 data->last_state_idx = index; 330 if (index >= 0) 331 data->needs_update = 1; 332} 333 334/** 335 * menu_update - attempts to guess what happened after entry 336 * @drv: cpuidle driver containing state data 337 * @dev: the CPU 338 */ 339static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 340{ 341 struct menu_device *data = &__get_cpu_var(menu_devices); 342 int last_idx = data->last_state_idx; 343 unsigned int last_idle_us = cpuidle_get_last_residency(dev); 344 struct cpuidle_state *target = &drv->states[last_idx]; 345 unsigned int measured_us; 346 u64 new_factor; 347 348 /* 349 * Ugh, this idle state doesn't support residency measurements, so we 350 * are basically lost in the dark. As a compromise, assume we slept 351 * for the whole expected time. 352 */ 353 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) 354 last_idle_us = data->expected_us; 355 356 357 measured_us = last_idle_us; 358 359 /* 360 * We correct for the exit latency; we are assuming here that the 361 * exit latency happens after the event that we're interested in. 362 */ 363 if (measured_us > data->exit_us) 364 measured_us -= data->exit_us; 365 366 367 /* update our correction ratio */ 368 369 new_factor = data->correction_factor[data->bucket] 370 * (DECAY - 1) / DECAY; 371 372 if (data->expected_us > 0 && measured_us < MAX_INTERESTING) 373 new_factor += RESOLUTION * measured_us / data->expected_us; 374 else 375 /* 376 * we were idle so long that we count it as a perfect 377 * prediction 378 */ 379 new_factor += RESOLUTION; 380 381 /* 382 * We don't want 0 as factor; we always want at least 383 * a tiny bit of estimated time. 384 */ 385 if (new_factor == 0) 386 new_factor = 1; 387 388 data->correction_factor[data->bucket] = new_factor; 389 390 /* update the repeating-pattern data */ 391 data->intervals[data->interval_ptr++] = last_idle_us; 392 if (data->interval_ptr >= INTERVALS) 393 data->interval_ptr = 0; 394} 395 396/** 397 * menu_enable_device - scans a CPU's states and does setup 398 * @drv: cpuidle driver 399 * @dev: the CPU 400 */ 401static int menu_enable_device(struct cpuidle_driver *drv, 402 struct cpuidle_device *dev) 403{ 404 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 405 406 memset(data, 0, sizeof(struct menu_device)); 407 408 return 0; 409} 410 411static struct cpuidle_governor menu_governor = { 412 .name = "menu", 413 .rating = 20, 414 .enable = menu_enable_device, 415 .select = menu_select, 416 .reflect = menu_reflect, 417 .owner = THIS_MODULE, 418}; 419 420/** 421 * init_menu - initializes the governor 422 */ 423static int __init init_menu(void) 424{ 425 return cpuidle_register_governor(&menu_governor); 426} 427 428/** 429 * exit_menu - exits the governor 430 */ 431static void __exit exit_menu(void) 432{ 433 cpuidle_unregister_governor(&menu_governor); 434} 435 436MODULE_LICENSE("GPL"); 437module_init(init_menu); 438module_exit(exit_menu); 439