cpuidle.c revision 47182668ca140ae067d5961ec8c59edf646b36c7
1/* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11#include <linux/clockchips.h> 12#include <linux/kernel.h> 13#include <linux/mutex.h> 14#include <linux/sched.h> 15#include <linux/notifier.h> 16#include <linux/pm_qos.h> 17#include <linux/cpu.h> 18#include <linux/cpuidle.h> 19#include <linux/ktime.h> 20#include <linux/hrtimer.h> 21#include <linux/module.h> 22#include <trace/events/power.h> 23 24#include "cpuidle.h" 25 26DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 27DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev); 28 29DEFINE_MUTEX(cpuidle_lock); 30LIST_HEAD(cpuidle_detected_devices); 31 32static int enabled_devices; 33static int off __read_mostly; 34static int initialized __read_mostly; 35 36int cpuidle_disabled(void) 37{ 38 return off; 39} 40void disable_cpuidle(void) 41{ 42 off = 1; 43} 44 45/** 46 * cpuidle_play_dead - cpu off-lining 47 * 48 * Returns in case of an error or no driver 49 */ 50int cpuidle_play_dead(void) 51{ 52 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 53 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 54 int i; 55 56 if (!drv) 57 return -ENODEV; 58 59 /* Find lowest-power state that supports long-term idle */ 60 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--) 61 if (drv->states[i].enter_dead) 62 return drv->states[i].enter_dead(dev, i); 63 64 return -ENODEV; 65} 66 67/** 68 * cpuidle_enter_state - enter the state and update stats 69 * @dev: cpuidle device for this cpu 70 * @drv: cpuidle driver for this cpu 71 * @next_state: index into drv->states of the state to enter 72 */ 73int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 74 int index) 75{ 76 int entered_state; 77 78 struct cpuidle_state *target_state = &drv->states[index]; 79 ktime_t time_start, time_end; 80 s64 diff; 81 82 time_start = ktime_get(); 83 84 entered_state = target_state->enter(dev, drv, index); 85 86 time_end = ktime_get(); 87 88 local_irq_enable(); 89 90 diff = ktime_to_us(ktime_sub(time_end, time_start)); 91 if (diff > INT_MAX) 92 diff = INT_MAX; 93 94 dev->last_residency = (int) diff; 95 96 if (entered_state >= 0) { 97 /* Update cpuidle counters */ 98 /* This can be moved to within driver enter routine 99 * but that results in multiple copies of same code. 100 */ 101 dev->states_usage[entered_state].time += dev->last_residency; 102 dev->states_usage[entered_state].usage++; 103 } else { 104 dev->last_residency = 0; 105 } 106 107 return entered_state; 108} 109 110/** 111 * cpuidle_idle_call - the main idle loop 112 * 113 * NOTE: no locks or semaphores should be used here 114 * return non-zero on failure 115 */ 116int cpuidle_idle_call(void) 117{ 118 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 119 struct cpuidle_driver *drv; 120 int next_state, entered_state; 121 122 if (off) 123 return -ENODEV; 124 125 if (!initialized) 126 return -ENODEV; 127 128 /* check if the device is ready */ 129 if (!dev || !dev->enabled) 130 return -EBUSY; 131 132 drv = cpuidle_get_cpu_driver(dev); 133 134 /* ask the governor for the next state */ 135 next_state = cpuidle_curr_governor->select(drv, dev); 136 if (need_resched()) { 137 dev->last_residency = 0; 138 /* give the governor an opportunity to reflect on the outcome */ 139 if (cpuidle_curr_governor->reflect) 140 cpuidle_curr_governor->reflect(dev, next_state); 141 local_irq_enable(); 142 return 0; 143 } 144 145 trace_cpu_idle_rcuidle(next_state, dev->cpu); 146 147 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) 148 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 149 &dev->cpu); 150 151 if (cpuidle_state_is_coupled(dev, drv, next_state)) 152 entered_state = cpuidle_enter_state_coupled(dev, drv, 153 next_state); 154 else 155 entered_state = cpuidle_enter_state(dev, drv, next_state); 156 157 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP) 158 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, 159 &dev->cpu); 160 161 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 162 163 /* give the governor an opportunity to reflect on the outcome */ 164 if (cpuidle_curr_governor->reflect) 165 cpuidle_curr_governor->reflect(dev, entered_state); 166 167 return 0; 168} 169 170/** 171 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 172 */ 173void cpuidle_install_idle_handler(void) 174{ 175 if (enabled_devices) { 176 /* Make sure all changes finished before we switch to new idle */ 177 smp_wmb(); 178 initialized = 1; 179 } 180} 181 182/** 183 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 184 */ 185void cpuidle_uninstall_idle_handler(void) 186{ 187 if (enabled_devices) { 188 initialized = 0; 189 kick_all_cpus_sync(); 190 } 191} 192 193/** 194 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 195 */ 196void cpuidle_pause_and_lock(void) 197{ 198 mutex_lock(&cpuidle_lock); 199 cpuidle_uninstall_idle_handler(); 200} 201 202EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 203 204/** 205 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 206 */ 207void cpuidle_resume_and_unlock(void) 208{ 209 cpuidle_install_idle_handler(); 210 mutex_unlock(&cpuidle_lock); 211} 212 213EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 214 215/* Currently used in suspend/resume path to suspend cpuidle */ 216void cpuidle_pause(void) 217{ 218 mutex_lock(&cpuidle_lock); 219 cpuidle_uninstall_idle_handler(); 220 mutex_unlock(&cpuidle_lock); 221} 222 223/* Currently used in suspend/resume path to resume cpuidle */ 224void cpuidle_resume(void) 225{ 226 mutex_lock(&cpuidle_lock); 227 cpuidle_install_idle_handler(); 228 mutex_unlock(&cpuidle_lock); 229} 230 231#ifdef CONFIG_ARCH_HAS_CPU_RELAX 232static int poll_idle(struct cpuidle_device *dev, 233 struct cpuidle_driver *drv, int index) 234{ 235 ktime_t t1, t2; 236 s64 diff; 237 238 t1 = ktime_get(); 239 local_irq_enable(); 240 while (!need_resched()) 241 cpu_relax(); 242 243 t2 = ktime_get(); 244 diff = ktime_to_us(ktime_sub(t2, t1)); 245 if (diff > INT_MAX) 246 diff = INT_MAX; 247 248 dev->last_residency = (int) diff; 249 250 return index; 251} 252 253static void poll_idle_init(struct cpuidle_driver *drv) 254{ 255 struct cpuidle_state *state = &drv->states[0]; 256 257 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 258 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 259 state->exit_latency = 0; 260 state->target_residency = 0; 261 state->power_usage = -1; 262 state->flags = 0; 263 state->enter = poll_idle; 264 state->disabled = false; 265} 266#else 267static void poll_idle_init(struct cpuidle_driver *drv) {} 268#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 269 270/** 271 * cpuidle_enable_device - enables idle PM for a CPU 272 * @dev: the CPU 273 * 274 * This function must be called between cpuidle_pause_and_lock and 275 * cpuidle_resume_and_unlock when used externally. 276 */ 277int cpuidle_enable_device(struct cpuidle_device *dev) 278{ 279 int ret; 280 struct cpuidle_driver *drv; 281 282 if (!dev) 283 return -EINVAL; 284 285 if (dev->enabled) 286 return 0; 287 288 drv = cpuidle_get_cpu_driver(dev); 289 290 if (!drv || !cpuidle_curr_governor) 291 return -EIO; 292 293 if (!dev->registered) 294 return -EINVAL; 295 296 if (!dev->state_count) 297 dev->state_count = drv->state_count; 298 299 poll_idle_init(drv); 300 301 ret = cpuidle_add_device_sysfs(dev); 302 if (ret) 303 return ret; 304 305 if (cpuidle_curr_governor->enable && 306 (ret = cpuidle_curr_governor->enable(drv, dev))) 307 goto fail_sysfs; 308 309 smp_wmb(); 310 311 dev->enabled = 1; 312 313 enabled_devices++; 314 return 0; 315 316fail_sysfs: 317 cpuidle_remove_device_sysfs(dev); 318 319 return ret; 320} 321 322EXPORT_SYMBOL_GPL(cpuidle_enable_device); 323 324/** 325 * cpuidle_disable_device - disables idle PM for a CPU 326 * @dev: the CPU 327 * 328 * This function must be called between cpuidle_pause_and_lock and 329 * cpuidle_resume_and_unlock when used externally. 330 */ 331void cpuidle_disable_device(struct cpuidle_device *dev) 332{ 333 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 334 335 if (!dev || !dev->enabled) 336 return; 337 338 if (!drv || !cpuidle_curr_governor) 339 return; 340 341 dev->enabled = 0; 342 343 if (cpuidle_curr_governor->disable) 344 cpuidle_curr_governor->disable(drv, dev); 345 346 cpuidle_remove_device_sysfs(dev); 347 enabled_devices--; 348} 349 350EXPORT_SYMBOL_GPL(cpuidle_disable_device); 351 352static void __cpuidle_unregister_device(struct cpuidle_device *dev) 353{ 354 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 355 356 list_del(&dev->device_list); 357 per_cpu(cpuidle_devices, dev->cpu) = NULL; 358 module_put(drv->owner); 359} 360 361static void __cpuidle_device_init(struct cpuidle_device *dev) 362{ 363 memset(dev->states_usage, 0, sizeof(dev->states_usage)); 364 dev->last_residency = 0; 365} 366 367/** 368 * __cpuidle_register_device - internal register function called before register 369 * and enable routines 370 * @dev: the cpu 371 * 372 * cpuidle_lock mutex must be held before this is called 373 */ 374static int __cpuidle_register_device(struct cpuidle_device *dev) 375{ 376 int ret; 377 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 378 379 if (!try_module_get(drv->owner)) 380 return -EINVAL; 381 382 per_cpu(cpuidle_devices, dev->cpu) = dev; 383 list_add(&dev->device_list, &cpuidle_detected_devices); 384 385 ret = cpuidle_coupled_register_device(dev); 386 if (ret) 387 __cpuidle_unregister_device(dev); 388 else 389 dev->registered = 1; 390 391 return ret; 392} 393 394/** 395 * cpuidle_register_device - registers a CPU's idle PM feature 396 * @dev: the cpu 397 */ 398int cpuidle_register_device(struct cpuidle_device *dev) 399{ 400 int ret = -EBUSY; 401 402 if (!dev) 403 return -EINVAL; 404 405 mutex_lock(&cpuidle_lock); 406 407 if (dev->registered) 408 goto out_unlock; 409 410 __cpuidle_device_init(dev); 411 412 ret = __cpuidle_register_device(dev); 413 if (ret) 414 goto out_unlock; 415 416 ret = cpuidle_add_sysfs(dev); 417 if (ret) 418 goto out_unregister; 419 420 ret = cpuidle_enable_device(dev); 421 if (ret) 422 goto out_sysfs; 423 424 cpuidle_install_idle_handler(); 425 426out_unlock: 427 mutex_unlock(&cpuidle_lock); 428 429 return ret; 430 431out_sysfs: 432 cpuidle_remove_sysfs(dev); 433out_unregister: 434 __cpuidle_unregister_device(dev); 435 goto out_unlock; 436} 437 438EXPORT_SYMBOL_GPL(cpuidle_register_device); 439 440/** 441 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 442 * @dev: the cpu 443 */ 444void cpuidle_unregister_device(struct cpuidle_device *dev) 445{ 446 if (dev->registered == 0) 447 return; 448 449 cpuidle_pause_and_lock(); 450 451 cpuidle_disable_device(dev); 452 453 cpuidle_remove_sysfs(dev); 454 455 __cpuidle_unregister_device(dev); 456 457 cpuidle_coupled_unregister_device(dev); 458 459 cpuidle_resume_and_unlock(); 460} 461 462EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 463 464/** 465 * cpuidle_unregister: unregister a driver and the devices. This function 466 * can be used only if the driver has been previously registered through 467 * the cpuidle_register function. 468 * 469 * @drv: a valid pointer to a struct cpuidle_driver 470 */ 471void cpuidle_unregister(struct cpuidle_driver *drv) 472{ 473 int cpu; 474 struct cpuidle_device *device; 475 476 for_each_cpu(cpu, drv->cpumask) { 477 device = &per_cpu(cpuidle_dev, cpu); 478 cpuidle_unregister_device(device); 479 } 480 481 cpuidle_unregister_driver(drv); 482} 483EXPORT_SYMBOL_GPL(cpuidle_unregister); 484 485/** 486 * cpuidle_register: registers the driver and the cpu devices with the 487 * coupled_cpus passed as parameter. This function is used for all common 488 * initialization pattern there are in the arch specific drivers. The 489 * devices is globally defined in this file. 490 * 491 * @drv : a valid pointer to a struct cpuidle_driver 492 * @coupled_cpus: a cpumask for the coupled states 493 * 494 * Returns 0 on success, < 0 otherwise 495 */ 496int cpuidle_register(struct cpuidle_driver *drv, 497 const struct cpumask *const coupled_cpus) 498{ 499 int ret, cpu; 500 struct cpuidle_device *device; 501 502 ret = cpuidle_register_driver(drv); 503 if (ret) { 504 pr_err("failed to register cpuidle driver\n"); 505 return ret; 506 } 507 508 for_each_cpu(cpu, drv->cpumask) { 509 device = &per_cpu(cpuidle_dev, cpu); 510 device->cpu = cpu; 511 512#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 513 /* 514 * On multiplatform for ARM, the coupled idle states could be 515 * enabled in the kernel even if the cpuidle driver does not 516 * use it. Note, coupled_cpus is a struct copy. 517 */ 518 if (coupled_cpus) 519 device->coupled_cpus = *coupled_cpus; 520#endif 521 ret = cpuidle_register_device(device); 522 if (!ret) 523 continue; 524 525 pr_err("Failed to register cpuidle device for cpu%d\n", cpu); 526 527 cpuidle_unregister(drv); 528 break; 529 } 530 531 return ret; 532} 533EXPORT_SYMBOL_GPL(cpuidle_register); 534 535#ifdef CONFIG_SMP 536 537static void smp_callback(void *v) 538{ 539 /* we already woke the CPU up, nothing more to do */ 540} 541 542/* 543 * This function gets called when a part of the kernel has a new latency 544 * requirement. This means we need to get all processors out of their C-state, 545 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 546 * wakes them all right up. 547 */ 548static int cpuidle_latency_notify(struct notifier_block *b, 549 unsigned long l, void *v) 550{ 551 smp_call_function(smp_callback, NULL, 1); 552 return NOTIFY_OK; 553} 554 555static struct notifier_block cpuidle_latency_notifier = { 556 .notifier_call = cpuidle_latency_notify, 557}; 558 559static inline void latency_notifier_init(struct notifier_block *n) 560{ 561 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 562} 563 564#else /* CONFIG_SMP */ 565 566#define latency_notifier_init(x) do { } while (0) 567 568#endif /* CONFIG_SMP */ 569 570/** 571 * cpuidle_init - core initializer 572 */ 573static int __init cpuidle_init(void) 574{ 575 int ret; 576 577 if (cpuidle_disabled()) 578 return -ENODEV; 579 580 ret = cpuidle_add_interface(cpu_subsys.dev_root); 581 if (ret) 582 return ret; 583 584 latency_notifier_init(&cpuidle_latency_notifier); 585 586 return 0; 587} 588 589module_param(off, int, 0444); 590core_initcall(cpuidle_init); 591