cpuidle.c revision bf4d1b5ddb78f86078ac6ae0415802d5f0c68f92
1/* 2 * cpuidle.c - core cpuidle infrastructure 3 * 4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11#include <linux/kernel.h> 12#include <linux/mutex.h> 13#include <linux/sched.h> 14#include <linux/notifier.h> 15#include <linux/pm_qos.h> 16#include <linux/cpu.h> 17#include <linux/cpuidle.h> 18#include <linux/ktime.h> 19#include <linux/hrtimer.h> 20#include <linux/module.h> 21#include <trace/events/power.h> 22 23#include "cpuidle.h" 24 25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 26 27DEFINE_MUTEX(cpuidle_lock); 28LIST_HEAD(cpuidle_detected_devices); 29 30static int enabled_devices; 31static int off __read_mostly; 32static int initialized __read_mostly; 33 34int cpuidle_disabled(void) 35{ 36 return off; 37} 38void disable_cpuidle(void) 39{ 40 off = 1; 41} 42 43static int __cpuidle_register_device(struct cpuidle_device *dev); 44 45static inline int cpuidle_enter(struct cpuidle_device *dev, 46 struct cpuidle_driver *drv, int index) 47{ 48 struct cpuidle_state *target_state = &drv->states[index]; 49 return target_state->enter(dev, drv, index); 50} 51 52static inline int cpuidle_enter_tk(struct cpuidle_device *dev, 53 struct cpuidle_driver *drv, int index) 54{ 55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter); 56} 57 58typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, int index); 60 61static cpuidle_enter_t cpuidle_enter_ops; 62 63/** 64 * cpuidle_play_dead - cpu off-lining 65 * 66 * Returns in case of an error or no driver 67 */ 68int cpuidle_play_dead(void) 69{ 70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 71 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 72 int i, dead_state = -1; 73 int power_usage = -1; 74 75 if (!drv) 76 return -ENODEV; 77 78 /* Find lowest-power state that supports long-term idle */ 79 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 80 struct cpuidle_state *s = &drv->states[i]; 81 82 if (s->power_usage < power_usage && s->enter_dead) { 83 power_usage = s->power_usage; 84 dead_state = i; 85 } 86 } 87 88 if (dead_state != -1) 89 return drv->states[dead_state].enter_dead(dev, dead_state); 90 91 return -ENODEV; 92} 93 94/** 95 * cpuidle_enter_state - enter the state and update stats 96 * @dev: cpuidle device for this cpu 97 * @drv: cpuidle driver for this cpu 98 * @next_state: index into drv->states of the state to enter 99 */ 100int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, 101 int next_state) 102{ 103 int entered_state; 104 105 entered_state = cpuidle_enter_ops(dev, drv, next_state); 106 107 if (entered_state >= 0) { 108 /* Update cpuidle counters */ 109 /* This can be moved to within driver enter routine 110 * but that results in multiple copies of same code. 111 */ 112 dev->states_usage[entered_state].time += 113 (unsigned long long)dev->last_residency; 114 dev->states_usage[entered_state].usage++; 115 } else { 116 dev->last_residency = 0; 117 } 118 119 return entered_state; 120} 121 122/** 123 * cpuidle_idle_call - the main idle loop 124 * 125 * NOTE: no locks or semaphores should be used here 126 * return non-zero on failure 127 */ 128int cpuidle_idle_call(void) 129{ 130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 131 struct cpuidle_driver *drv; 132 int next_state, entered_state; 133 134 if (off) 135 return -ENODEV; 136 137 if (!initialized) 138 return -ENODEV; 139 140 /* check if the device is ready */ 141 if (!dev || !dev->enabled) 142 return -EBUSY; 143 144 drv = cpuidle_get_cpu_driver(dev); 145 146 /* ask the governor for the next state */ 147 next_state = cpuidle_curr_governor->select(drv, dev); 148 if (need_resched()) { 149 dev->last_residency = 0; 150 /* give the governor an opportunity to reflect on the outcome */ 151 if (cpuidle_curr_governor->reflect) 152 cpuidle_curr_governor->reflect(dev, next_state); 153 local_irq_enable(); 154 return 0; 155 } 156 157 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 158 trace_cpu_idle_rcuidle(next_state, dev->cpu); 159 160 if (cpuidle_state_is_coupled(dev, drv, next_state)) 161 entered_state = cpuidle_enter_state_coupled(dev, drv, 162 next_state); 163 else 164 entered_state = cpuidle_enter_state(dev, drv, next_state); 165 166 trace_power_end_rcuidle(dev->cpu); 167 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 168 169 /* give the governor an opportunity to reflect on the outcome */ 170 if (cpuidle_curr_governor->reflect) 171 cpuidle_curr_governor->reflect(dev, entered_state); 172 173 return 0; 174} 175 176/** 177 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler 178 */ 179void cpuidle_install_idle_handler(void) 180{ 181 if (enabled_devices) { 182 /* Make sure all changes finished before we switch to new idle */ 183 smp_wmb(); 184 initialized = 1; 185 } 186} 187 188/** 189 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler 190 */ 191void cpuidle_uninstall_idle_handler(void) 192{ 193 if (enabled_devices) { 194 initialized = 0; 195 kick_all_cpus_sync(); 196 } 197} 198 199/** 200 * cpuidle_pause_and_lock - temporarily disables CPUIDLE 201 */ 202void cpuidle_pause_and_lock(void) 203{ 204 mutex_lock(&cpuidle_lock); 205 cpuidle_uninstall_idle_handler(); 206} 207 208EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock); 209 210/** 211 * cpuidle_resume_and_unlock - resumes CPUIDLE operation 212 */ 213void cpuidle_resume_and_unlock(void) 214{ 215 cpuidle_install_idle_handler(); 216 mutex_unlock(&cpuidle_lock); 217} 218 219EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 220 221/* Currently used in suspend/resume path to suspend cpuidle */ 222void cpuidle_pause(void) 223{ 224 mutex_lock(&cpuidle_lock); 225 cpuidle_uninstall_idle_handler(); 226 mutex_unlock(&cpuidle_lock); 227} 228 229/* Currently used in suspend/resume path to resume cpuidle */ 230void cpuidle_resume(void) 231{ 232 mutex_lock(&cpuidle_lock); 233 cpuidle_install_idle_handler(); 234 mutex_unlock(&cpuidle_lock); 235} 236 237/** 238 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function 239 * @dev: pointer to a valid cpuidle_device object 240 * @drv: pointer to a valid cpuidle_driver object 241 * @index: index of the target cpuidle state. 242 */ 243int cpuidle_wrap_enter(struct cpuidle_device *dev, 244 struct cpuidle_driver *drv, int index, 245 int (*enter)(struct cpuidle_device *dev, 246 struct cpuidle_driver *drv, int index)) 247{ 248 ktime_t time_start, time_end; 249 s64 diff; 250 251 time_start = ktime_get(); 252 253 index = enter(dev, drv, index); 254 255 time_end = ktime_get(); 256 257 local_irq_enable(); 258 259 diff = ktime_to_us(ktime_sub(time_end, time_start)); 260 if (diff > INT_MAX) 261 diff = INT_MAX; 262 263 dev->last_residency = (int) diff; 264 265 return index; 266} 267 268#ifdef CONFIG_ARCH_HAS_CPU_RELAX 269static int poll_idle(struct cpuidle_device *dev, 270 struct cpuidle_driver *drv, int index) 271{ 272 ktime_t t1, t2; 273 s64 diff; 274 275 t1 = ktime_get(); 276 local_irq_enable(); 277 while (!need_resched()) 278 cpu_relax(); 279 280 t2 = ktime_get(); 281 diff = ktime_to_us(ktime_sub(t2, t1)); 282 if (diff > INT_MAX) 283 diff = INT_MAX; 284 285 dev->last_residency = (int) diff; 286 287 return index; 288} 289 290static void poll_idle_init(struct cpuidle_driver *drv) 291{ 292 struct cpuidle_state *state = &drv->states[0]; 293 294 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 295 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 296 state->exit_latency = 0; 297 state->target_residency = 0; 298 state->power_usage = -1; 299 state->flags = 0; 300 state->enter = poll_idle; 301 state->disabled = false; 302} 303#else 304static void poll_idle_init(struct cpuidle_driver *drv) {} 305#endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 306 307/** 308 * cpuidle_enable_device - enables idle PM for a CPU 309 * @dev: the CPU 310 * 311 * This function must be called between cpuidle_pause_and_lock and 312 * cpuidle_resume_and_unlock when used externally. 313 */ 314int cpuidle_enable_device(struct cpuidle_device *dev) 315{ 316 int ret, i; 317 struct cpuidle_driver *drv; 318 319 if (!dev) 320 return -EINVAL; 321 322 if (dev->enabled) 323 return 0; 324 325 drv = cpuidle_get_cpu_driver(dev); 326 327 if (!drv || !cpuidle_curr_governor) 328 return -EIO; 329 330 if (!dev->state_count) 331 dev->state_count = drv->state_count; 332 333 if (dev->registered == 0) { 334 ret = __cpuidle_register_device(dev); 335 if (ret) 336 return ret; 337 } 338 339 cpuidle_enter_ops = drv->en_core_tk_irqen ? 340 cpuidle_enter_tk : cpuidle_enter; 341 342 poll_idle_init(drv); 343 344 ret = cpuidle_add_device_sysfs(dev); 345 if (ret) 346 return ret; 347 348 if (cpuidle_curr_governor->enable && 349 (ret = cpuidle_curr_governor->enable(drv, dev))) 350 goto fail_sysfs; 351 352 for (i = 0; i < dev->state_count; i++) { 353 dev->states_usage[i].usage = 0; 354 dev->states_usage[i].time = 0; 355 } 356 dev->last_residency = 0; 357 358 smp_wmb(); 359 360 dev->enabled = 1; 361 362 enabled_devices++; 363 return 0; 364 365fail_sysfs: 366 cpuidle_remove_device_sysfs(dev); 367 368 return ret; 369} 370 371EXPORT_SYMBOL_GPL(cpuidle_enable_device); 372 373/** 374 * cpuidle_disable_device - disables idle PM for a CPU 375 * @dev: the CPU 376 * 377 * This function must be called between cpuidle_pause_and_lock and 378 * cpuidle_resume_and_unlock when used externally. 379 */ 380void cpuidle_disable_device(struct cpuidle_device *dev) 381{ 382 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 383 384 if (!dev || !dev->enabled) 385 return; 386 387 if (!drv || !cpuidle_curr_governor) 388 return; 389 390 dev->enabled = 0; 391 392 if (cpuidle_curr_governor->disable) 393 cpuidle_curr_governor->disable(drv, dev); 394 395 cpuidle_remove_device_sysfs(dev); 396 enabled_devices--; 397} 398 399EXPORT_SYMBOL_GPL(cpuidle_disable_device); 400 401/** 402 * __cpuidle_register_device - internal register function called before register 403 * and enable routines 404 * @dev: the cpu 405 * 406 * cpuidle_lock mutex must be held before this is called 407 */ 408static int __cpuidle_register_device(struct cpuidle_device *dev) 409{ 410 int ret; 411 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 412 413 if (!try_module_get(drv->owner)) 414 return -EINVAL; 415 416 per_cpu(cpuidle_devices, dev->cpu) = dev; 417 list_add(&dev->device_list, &cpuidle_detected_devices); 418 ret = cpuidle_add_sysfs(dev); 419 if (ret) 420 goto err_sysfs; 421 422 ret = cpuidle_coupled_register_device(dev); 423 if (ret) 424 goto err_coupled; 425 426 dev->registered = 1; 427 return 0; 428 429err_coupled: 430 cpuidle_remove_sysfs(dev); 431err_sysfs: 432 list_del(&dev->device_list); 433 per_cpu(cpuidle_devices, dev->cpu) = NULL; 434 module_put(drv->owner); 435 return ret; 436} 437 438/** 439 * cpuidle_register_device - registers a CPU's idle PM feature 440 * @dev: the cpu 441 */ 442int cpuidle_register_device(struct cpuidle_device *dev) 443{ 444 int ret; 445 446 if (!dev) 447 return -EINVAL; 448 449 mutex_lock(&cpuidle_lock); 450 451 if ((ret = __cpuidle_register_device(dev))) { 452 mutex_unlock(&cpuidle_lock); 453 return ret; 454 } 455 456 cpuidle_enable_device(dev); 457 cpuidle_install_idle_handler(); 458 459 mutex_unlock(&cpuidle_lock); 460 461 return 0; 462 463} 464 465EXPORT_SYMBOL_GPL(cpuidle_register_device); 466 467/** 468 * cpuidle_unregister_device - unregisters a CPU's idle PM feature 469 * @dev: the cpu 470 */ 471void cpuidle_unregister_device(struct cpuidle_device *dev) 472{ 473 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); 474 475 if (dev->registered == 0) 476 return; 477 478 cpuidle_pause_and_lock(); 479 480 cpuidle_disable_device(dev); 481 482 cpuidle_remove_sysfs(dev); 483 list_del(&dev->device_list); 484 per_cpu(cpuidle_devices, dev->cpu) = NULL; 485 486 cpuidle_coupled_unregister_device(dev); 487 488 cpuidle_resume_and_unlock(); 489 490 module_put(drv->owner); 491} 492 493EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 494 495#ifdef CONFIG_SMP 496 497static void smp_callback(void *v) 498{ 499 /* we already woke the CPU up, nothing more to do */ 500} 501 502/* 503 * This function gets called when a part of the kernel has a new latency 504 * requirement. This means we need to get all processors out of their C-state, 505 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 506 * wakes them all right up. 507 */ 508static int cpuidle_latency_notify(struct notifier_block *b, 509 unsigned long l, void *v) 510{ 511 smp_call_function(smp_callback, NULL, 1); 512 return NOTIFY_OK; 513} 514 515static struct notifier_block cpuidle_latency_notifier = { 516 .notifier_call = cpuidle_latency_notify, 517}; 518 519static inline void latency_notifier_init(struct notifier_block *n) 520{ 521 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n); 522} 523 524#else /* CONFIG_SMP */ 525 526#define latency_notifier_init(x) do { } while (0) 527 528#endif /* CONFIG_SMP */ 529 530/** 531 * cpuidle_init - core initializer 532 */ 533static int __init cpuidle_init(void) 534{ 535 int ret; 536 537 if (cpuidle_disabled()) 538 return -ENODEV; 539 540 ret = cpuidle_add_interface(cpu_subsys.dev_root); 541 if (ret) 542 return ret; 543 544 latency_notifier_init(&cpuidle_latency_notifier); 545 546 return 0; 547} 548 549module_param(off, int, 0444); 550core_initcall(cpuidle_init); 551