cpuidle.c revision 4126c0197bc8c58a0bb7fcda07b01b596b6fb4c5
1/* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11#include <linux/kernel.h> 12#include <linux/mutex.h> 13#include <linux/sched.h> 14#include <linux/notifier.h> 15#include <linux/pm_qos.h> 16#include <linux/cpu.h> 17#include <linux/cpuidle.h> 18#include <linux/ktime.h> 19#include <linux/hrtimer.h> 20#include <linux/module.h> 21#include <trace/events/power.h> 22 23#include "cpuidle.h" 24 25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26 27DEFINE_MUTEX(cpuidle_lock); 28LIST_HEAD(cpuidle_detected_devices); 29 30static int enabled_devices; 31static int off __read_mostly; 32static int initialized __read_mostly; 33 34int cpuidle_disabled(void) 35{ 36 return off; 37} 38void disable_cpuidle(void) 39{ 40 off = 1; 41} 42 43#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) 44static void cpuidle_kick_cpus(void) 45{ 46 cpu_idle_wait(); 47} 48#elif defined(CONFIG_SMP) 49# error "Arch needs cpu_idle_wait() equivalent here" 50#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */ 51static void cpuidle_kick_cpus(void) {} 52#endif 53 54static int __cpuidle_register_device(struct cpuidle_device *dev); 55 56static inline int cpuidle_enter(struct cpuidle_device *dev, 57 struct cpuidle_driver *drv, int index) 58{ 59 struct cpuidle_state *target_state = &drv->states[index]; 60 return target_state->enter(dev, drv, index); 61} 62 63static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 64 struct cpuidle_driver *drv, int index) 65{ 66 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 67} 68 69typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 70 struct cpuidle_driver *drv, int index); 71 72static cpuidle_enter_t cpuidle_enter_ops; 73 74/** 75 * cpuidle_play_dead - cpu off-lining 76 * 77 * Returns in case of an error or no driver 78 */ 79int cpuidle_play_dead(void) 80{ 81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 82 struct cpuidle_driver *drv = cpuidle_get_driver(); 83 int i, dead_state = -1; 84 int power_usage = -1; 85 86 if (!drv) 87 return -ENODEV; 88 89 /* Find lowest-power state that supports long-term idle */ 90 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 91 struct cpuidle_state *s = &drv->states[i]; 92 93 if (s->power_usage < power_usage && s->enter_dead) { 94 power_usage = s->power_usage; 95 dead_state = i; 96 } 97 } 98 99 if (dead_state != -1) 100 return drv->states[dead_state].enter_dead(dev, dead_state); 101 102 return -ENODEV; 103} 104 105/** 106 * cpuidle_enter_state - enter the state and update stats 107 * @dev: cpuidle device for this cpu 108 * @drv: cpuidle driver for this cpu 109 * @next_state: index into drv->states of the state to enter 110 */ 111int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 112 int next_state) 113{ 114 int entered_state; 115 116 entered_state = cpuidle_enter_ops(dev, drv, next_state); 117 118 if (entered_state >= 0) { 119 /* Update cpuidle counters */ 120 /* This can be moved to within driver enter routine 121 * but that results in multiple copies of same code. 122 */ 123 dev->states_usage[entered_state].time += 124 (unsigned long long)dev->last_residency; 125 dev->states_usage[entered_state].usage++; 126 } else { 127 dev->last_residency = 0; 128 } 129 130 return entered_state; 131} 132 133/** 134 * cpuidle_idle_call - the main idle loop 135 * 136 * NOTE: no locks or semaphores should be used here 137 * return non-zero on failure 138 */ 139int cpuidle_idle_call(void) 140{ 141 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 142 struct cpuidle_driver *drv = cpuidle_get_driver(); 143 int next_state, entered_state; 144 145 if (off) 146 return -ENODEV; 147 148 if (!initialized) 149 return -ENODEV; 150 151 /* check if the device is ready */ 152 if (!dev || !dev->enabled) 153 return -EBUSY; 154 155#if 0 156 /* shows regressions, re-enable for 2.6.29 */ 157 /* 158 * run any timers that can be run now, at this point 159 * before calculating the idle duration etc. 160 */ 161 hrtimer_peek_ahead_timers(); 162#endif 163 164 /* ask the governor for the next state */ 165 next_state = cpuidle_curr_governor->select(drv, dev); 166 if (need_resched()) { 167 local_irq_enable(); 168 return 0; 169 } 170 171 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 172 trace_cpu_idle_rcuidle(next_state, dev->cpu); 173 174 if (cpuidle_state_is_coupled(dev, drv, next_state)) 175 entered_state = cpuidle_enter_state_coupled(dev, drv, 176 next_state); 177 else 178 entered_state = cpuidle_enter_state(dev, drv, next_state); 179 180 trace_power_end_rcuidle(dev->cpu); 181 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 182 183 /* give the governor an opportunity to reflect on the outcome */ 184 if (cpuidle_curr_governor->reflect) 185 cpuidle_curr_governor->reflect(dev, entered_state); 186 187 return 0; 188} 189 190/** 191 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 192 */ 193void cpuidle_install_idle_handler(void) 194{ 195 if (enabled_devices) { 196 /* Make sure all changes finished before we switch to new idle */ 197 smp_wmb(); 198 initialized = 1; 199 } 200} 201 202/** 203 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 204 */ 205void cpuidle_uninstall_idle_handler(void) 206{ 207 if (enabled_devices) { 208 initialized = 0; 209 cpuidle_kick_cpus(); 210 } 211} 212 213/** 214 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 215 */ 216void cpuidle_pause_and_lock(void) 217{ 218 mutex_lock(&cpuidle_lock); 219 cpuidle_uninstall_idle_handler(); 220} 221 222EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 223 224/** 225 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 226 */ 227void cpuidle_resume_and_unlock(void) 228{ 229 cpuidle_install_idle_handler(); 230 mutex_unlock(&cpuidle_lock); 231} 232 233EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 234 235/** 236 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 237 * @dev: pointer to a valid cpuidle_device object 238 * @drv: pointer to a valid cpuidle_driver object 239 * @index: index of the target cpuidle state. 240 */ 241int cpuidle_wrap_enter(struct cpuidle_device *dev, 242 struct cpuidle_driver *drv, int index, 243 int (*enter)(struct cpuidle_device *dev, 244 struct cpuidle_driver *drv, int index)) 245{ 246 ktime_t time_start, time_end; 247 s64 diff; 248 249 time_start = ktime_get(); 250 251 index = enter(dev, drv, index); 252 253 time_end = ktime_get(); 254 255 local_irq_enable(); 256 257 diff = ktime_to_us(ktime_sub(time_end, time_start)); 258 if (diff > INT_MAX) 259 diff = INT_MAX; 260 261 dev->last_residency = (int) diff; 262 263 return index; 264} 265 266#ifdef CONFIG_ARCH_HAS_CPU_RELAX 267static int poll_idle(struct cpuidle_device *dev, 268 struct cpuidle_driver *drv, int index) 269{ 270 ktime_t t1, t2; 271 s64 diff; 272 273 t1 = ktime_get(); 274 local_irq_enable(); 275 while (!need_resched()) 276 cpu_relax(); 277 278 t2 = ktime_get(); 279 diff = ktime_to_us(ktime_sub(t2, t1)); 280 if (diff > INT_MAX) 281 diff = INT_MAX; 282 283 dev->last_residency = (int) diff; 284 285 return index; 286} 287 288static void poll_idle_init(struct cpuidle_driver *drv) 289{ 290 struct cpuidle_state *state = &drv->states[0]; 291 292 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 293 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 294 state->exit_latency = 0; 295 state->target_residency = 0; 296 state->power_usage = -1; 297 state->flags = 0; 298 state->enter = poll_idle; 299 state->disable = 0; 300} 301#else 302static void poll_idle_init(struct cpuidle_driver *drv) {} 303#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 304 305/** 306 * cpuidle_enable_device - enables idle PM for a CPU 307 * @dev: the CPU 308 * 309 * This function must be called between cpuidle_pause_and_lock and 310 * cpuidle_resume_and_unlock when used externally. 311 */ 312int cpuidle_enable_device(struct cpuidle_device *dev) 313{ 314 int ret, i; 315 struct cpuidle_driver *drv = cpuidle_get_driver(); 316 317 if (dev->enabled) 318 return 0; 319 if (!drv || !cpuidle_curr_governor) 320 return -EIO; 321 if (!dev->state_count) 322 dev->state_count = drv->state_count; 323 324 if (dev->registered == 0) { 325 ret = __cpuidle_register_device(dev); 326 if (ret) 327 return ret; 328 } 329 330 cpuidle_enter_ops = drv->en_core_tk_irqen ? 331 cpuidle_enter_tk : cpuidle_enter; 332 333 poll_idle_init(drv); 334 335 if ((ret = cpuidle_add_state_sysfs(dev))) 336 return ret; 337 338 if (cpuidle_curr_governor->enable && 339 (ret = cpuidle_curr_governor->enable(drv, dev))) 340 goto fail_sysfs; 341 342 for (i = 0; i < dev->state_count; i++) { 343 dev->states_usage[i].usage = 0; 344 dev->states_usage[i].time = 0; 345 } 346 dev->last_residency = 0; 347 348 smp_wmb(); 349 350 dev->enabled = 1; 351 352 enabled_devices++; 353 return 0; 354 355fail_sysfs: 356 cpuidle_remove_state_sysfs(dev); 357 358 return ret; 359} 360 361EXPORT_SYMBOL_GPL(cpuidle_enable_device); 362 363/** 364 * cpuidle_disable_device - disables idle PM for a CPU 365 * @dev: the CPU 366 * 367 * This function must be called between cpuidle_pause_and_lock and 368 * cpuidle_resume_and_unlock when used externally. 369 */ 370void cpuidle_disable_device(struct cpuidle_device *dev) 371{ 372 if (!dev->enabled) 373 return; 374 if (!cpuidle_get_driver() || !cpuidle_curr_governor) 375 return; 376 377 dev->enabled = 0; 378 379 if (cpuidle_curr_governor->disable) 380 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); 381 382 cpuidle_remove_state_sysfs(dev); 383 enabled_devices--; 384} 385 386EXPORT_SYMBOL_GPL(cpuidle_disable_device); 387 388/** 389 * __cpuidle_register_device - internal register function called before register 390 * and enable routines 391 * @dev: the cpu 392 * 393 * cpuidle_lock mutex must be held before this is called 394 */ 395static int __cpuidle_register_device(struct cpuidle_device *dev) 396{ 397 int ret; 398 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 399 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 400 401 if (!dev) 402 return -EINVAL; 403 if (!try_module_get(cpuidle_driver->owner)) 404 return -EINVAL; 405 406 init_completion(&dev->kobj_unregister); 407 408 per_cpu(cpuidle_devices, dev->cpu) = dev; 409 list_add(&dev->device_list, &cpuidle_detected_devices); 410 ret = cpuidle_add_sysfs(cpu_dev); 411 if (ret) 412 goto err_sysfs; 413 414 ret = cpuidle_coupled_register_device(dev); 415 if (ret) 416 goto err_coupled; 417 418 dev->registered = 1; 419 return 0; 420 421err_coupled: 422 cpuidle_remove_sysfs(cpu_dev); 423 wait_for_completion(&dev->kobj_unregister); 424err_sysfs: 425 list_del(&dev->device_list); 426 per_cpu(cpuidle_devices, dev->cpu) = NULL; 427 module_put(cpuidle_driver->owner); 428 return ret; 429} 430 431/** 432 * cpuidle_register_device - registers a CPU's idle PM feature 433 * @dev: the cpu 434 */ 435int cpuidle_register_device(struct cpuidle_device *dev) 436{ 437 int ret; 438 439 mutex_lock(&cpuidle_lock); 440 441 if ((ret = __cpuidle_register_device(dev))) { 442 mutex_unlock(&cpuidle_lock); 443 return ret; 444 } 445 446 cpuidle_enable_device(dev); 447 cpuidle_install_idle_handler(); 448 449 mutex_unlock(&cpuidle_lock); 450 451 return 0; 452 453} 454 455EXPORT_SYMBOL_GPL(cpuidle_register_device); 456 457/** 458 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 459 * @dev: the cpu 460 */ 461void cpuidle_unregister_device(struct cpuidle_device *dev) 462{ 463 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 464 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); 465 466 if (dev->registered == 0) 467 return; 468 469 cpuidle_pause_and_lock(); 470 471 cpuidle_disable_device(dev); 472 473 cpuidle_remove_sysfs(cpu_dev); 474 list_del(&dev->device_list); 475 wait_for_completion(&dev->kobj_unregister); 476 per_cpu(cpuidle_devices, dev->cpu) = NULL; 477 478 cpuidle_coupled_unregister_device(dev); 479 480 cpuidle_resume_and_unlock(); 481 482 module_put(cpuidle_driver->owner); 483} 484 485EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 486 487#ifdef CONFIG_SMP 488 489static void smp_callback(void *v) 490{ 491 /* we already woke the CPU up, nothing more to do */ 492} 493 494/* 495 * This function gets called when a part of the kernel has a new latency 496 * requirement. This means we need to get all processors out of their C-state, 497 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 498 * wakes them all right up. 499 */ 500static int cpuidle_latency_notify(struct notifier_block *b, 501 unsigned long l, void *v) 502{ 503 smp_call_function(smp_callback, NULL, 1); 504 return NOTIFY_OK; 505} 506 507static struct notifier_block cpuidle_latency_notifier = { 508 .notifier_call = cpuidle_latency_notify, 509}; 510 511static inline void latency_notifier_init(struct notifier_block *n) 512{ 513 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 514} 515 516#else /* CONFIG_SMP */ 517 518#define latency_notifier_init(x) do { } while (0) 519 520#endif /* CONFIG_SMP */ 521 522/** 523 * cpuidle_init - core initializer 524 */ 525static int __init cpuidle_init(void) 526{ 527 int ret; 528 529 if (cpuidle_disabled()) 530 return -ENODEV; 531 532 ret = cpuidle_add_interface(cpu_subsys.dev_root); 533 if (ret) 534 return ret; 535 536 latency_notifier_init(&cpuidle_latency_notifier); 537 538 return 0; 539} 540 541module_param(off, int, 0444); 542core_initcall(cpuidle_init); 543