main.c revision 1a9a91525d806f2b3bd8b57b963755a96fd36ce2
1/* 2 * drivers/base/power/main.c - Where the driver meets power management. 3 * 4 * Copyright (c) 2003 Patrick Mochel 5 * Copyright (c) 2003 Open Source Development Lab 6 * 7 * This file is released under the GPLv2 8 * 9 * 10 * The driver model core calls device_pm_add() when a device is registered. 11 * This will initialize the embedded device_pm_info object in the device 12 * and add it to the list of power-controlled devices. sysfs entries for 13 * controlling device power management will also be added. 14 * 15 * A separate list is used for keeping track of power info, because the power 16 * domain dependencies may differ from the ancestral dependencies that the 17 * subsystem list maintains. 18 */ 19 20#include <linux/device.h> 21#include <linux/kallsyms.h> 22#include <linux/mutex.h> 23#include <linux/pm.h> 24#include <linux/pm_runtime.h> 25#include <linux/resume-trace.h> 26#include <linux/interrupt.h> 27#include <linux/sched.h> 28#include <linux/async.h> 29#include <linux/suspend.h> 30 31#include "../base.h" 32#include "power.h" 33 34/* 35 * The entries in the dpm_list list are in a depth first order, simply 36 * because children are guaranteed to be discovered after parents, and 37 * are inserted at the back of the list on discovery. 38 * 39 * Since device_pm_add() may be called with a device lock held, 40 * we must never try to acquire a device lock while holding 41 * dpm_list_mutex. 42 */ 43 44LIST_HEAD(dpm_list); 45LIST_HEAD(dpm_prepared_list); 46LIST_HEAD(dpm_suspended_list); 47LIST_HEAD(dpm_noirq_list); 48 49static DEFINE_MUTEX(dpm_list_mtx); 50static pm_message_t pm_transition; 51 52static int async_error; 53 54/** 55 * device_pm_init - Initialize the PM-related part of a device object. 56 * @dev: Device object being initialized. 57 */ 58void device_pm_init(struct device *dev) 59{ 60 dev->power.is_prepared = false; 61 dev->power.is_suspended = false; 62 init_completion(&dev->power.completion); 63 complete_all(&dev->power.completion); 64 dev->power.wakeup = NULL; 65 spin_lock_init(&dev->power.lock); 66 pm_runtime_init(dev); 67 INIT_LIST_HEAD(&dev->power.entry); 68 dev->power.power_state = PMSG_INVALID; 69} 70 71/** 72 * device_pm_lock - Lock the list of active devices used by the PM core. 73 */ 74void device_pm_lock(void) 75{ 76 mutex_lock(&dpm_list_mtx); 77} 78 79/** 80 * device_pm_unlock - Unlock the list of active devices used by the PM core. 81 */ 82void device_pm_unlock(void) 83{ 84 mutex_unlock(&dpm_list_mtx); 85} 86 87/** 88 * device_pm_add - Add a device to the PM core's list of active devices. 89 * @dev: Device to add to the list. 90 */ 91void device_pm_add(struct device *dev) 92{ 93 pr_debug("PM: Adding info for %s:%s\n", 94 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 95 mutex_lock(&dpm_list_mtx); 96 if (dev->parent && dev->parent->power.is_prepared) 97 dev_warn(dev, "parent %s should not be sleeping\n", 98 dev_name(dev->parent)); 99 list_add_tail(&dev->power.entry, &dpm_list); 100 dev_pm_qos_constraints_init(dev); 101 mutex_unlock(&dpm_list_mtx); 102} 103 104/** 105 * device_pm_remove - Remove a device from the PM core's list of active devices. 106 * @dev: Device to be removed from the list. 107 */ 108void device_pm_remove(struct device *dev) 109{ 110 pr_debug("PM: Removing info for %s:%s\n", 111 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 complete_all(&dev->power.completion); 113 mutex_lock(&dpm_list_mtx); 114 dev_pm_qos_constraints_destroy(dev); 115 list_del_init(&dev->power.entry); 116 mutex_unlock(&dpm_list_mtx); 117 device_wakeup_disable(dev); 118 pm_runtime_remove(dev); 119} 120 121/** 122 * device_pm_move_before - Move device in the PM core's list of active devices. 123 * @deva: Device to move in dpm_list. 124 * @devb: Device @deva should come before. 125 */ 126void device_pm_move_before(struct device *deva, struct device *devb) 127{ 128 pr_debug("PM: Moving %s:%s before %s:%s\n", 129 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 130 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 131 /* Delete deva from dpm_list and reinsert before devb. */ 132 list_move_tail(&deva->power.entry, &devb->power.entry); 133} 134 135/** 136 * device_pm_move_after - Move device in the PM core's list of active devices. 137 * @deva: Device to move in dpm_list. 138 * @devb: Device @deva should come after. 139 */ 140void device_pm_move_after(struct device *deva, struct device *devb) 141{ 142 pr_debug("PM: Moving %s:%s after %s:%s\n", 143 deva->bus ? deva->bus->name : "No Bus", dev_name(deva), 144 devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); 145 /* Delete deva from dpm_list and reinsert after devb. */ 146 list_move(&deva->power.entry, &devb->power.entry); 147} 148 149/** 150 * device_pm_move_last - Move device to end of the PM core's list of devices. 151 * @dev: Device to move in dpm_list. 152 */ 153void device_pm_move_last(struct device *dev) 154{ 155 pr_debug("PM: Moving %s:%s to end of list\n", 156 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 157 list_move_tail(&dev->power.entry, &dpm_list); 158} 159 160static ktime_t initcall_debug_start(struct device *dev) 161{ 162 ktime_t calltime = ktime_set(0, 0); 163 164 if (initcall_debug) { 165 pr_info("calling %s+ @ %i\n", 166 dev_name(dev), task_pid_nr(current)); 167 calltime = ktime_get(); 168 } 169 170 return calltime; 171} 172 173static void initcall_debug_report(struct device *dev, ktime_t calltime, 174 int error) 175{ 176 ktime_t delta, rettime; 177 178 if (initcall_debug) { 179 rettime = ktime_get(); 180 delta = ktime_sub(rettime, calltime); 181 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 182 error, (unsigned long long)ktime_to_ns(delta) >> 10); 183 } 184} 185 186/** 187 * dpm_wait - Wait for a PM operation to complete. 188 * @dev: Device to wait for. 189 * @async: If unset, wait only if the device's power.async_suspend flag is set. 190 */ 191static void dpm_wait(struct device *dev, bool async) 192{ 193 if (!dev) 194 return; 195 196 if (async || (pm_async_enabled && dev->power.async_suspend)) 197 wait_for_completion(&dev->power.completion); 198} 199 200static int dpm_wait_fn(struct device *dev, void *async_ptr) 201{ 202 dpm_wait(dev, *((bool *)async_ptr)); 203 return 0; 204} 205 206static void dpm_wait_for_children(struct device *dev, bool async) 207{ 208 device_for_each_child(dev, &async, dpm_wait_fn); 209} 210 211/** 212 * pm_op - Execute the PM operation appropriate for given PM event. 213 * @dev: Device to handle. 214 * @ops: PM operations to choose from. 215 * @state: PM transition of the system being carried out. 216 */ 217static int pm_op(struct device *dev, 218 const struct dev_pm_ops *ops, 219 pm_message_t state) 220{ 221 int error = 0; 222 ktime_t calltime; 223 224 calltime = initcall_debug_start(dev); 225 226 switch (state.event) { 227#ifdef CONFIG_SUSPEND 228 case PM_EVENT_SUSPEND: 229 if (ops->suspend) { 230 error = ops->suspend(dev); 231 suspend_report_result(ops->suspend, error); 232 } 233 break; 234 case PM_EVENT_RESUME: 235 if (ops->resume) { 236 error = ops->resume(dev); 237 suspend_report_result(ops->resume, error); 238 } 239 break; 240#endif /* CONFIG_SUSPEND */ 241#ifdef CONFIG_HIBERNATE_CALLBACKS 242 case PM_EVENT_FREEZE: 243 case PM_EVENT_QUIESCE: 244 if (ops->freeze) { 245 error = ops->freeze(dev); 246 suspend_report_result(ops->freeze, error); 247 } 248 break; 249 case PM_EVENT_HIBERNATE: 250 if (ops->poweroff) { 251 error = ops->poweroff(dev); 252 suspend_report_result(ops->poweroff, error); 253 } 254 break; 255 case PM_EVENT_THAW: 256 case PM_EVENT_RECOVER: 257 if (ops->thaw) { 258 error = ops->thaw(dev); 259 suspend_report_result(ops->thaw, error); 260 } 261 break; 262 case PM_EVENT_RESTORE: 263 if (ops->restore) { 264 error = ops->restore(dev); 265 suspend_report_result(ops->restore, error); 266 } 267 break; 268#endif /* CONFIG_HIBERNATE_CALLBACKS */ 269 default: 270 error = -EINVAL; 271 } 272 273 initcall_debug_report(dev, calltime, error); 274 275 return error; 276} 277 278/** 279 * pm_noirq_op - Execute the PM operation appropriate for given PM event. 280 * @dev: Device to handle. 281 * @ops: PM operations to choose from. 282 * @state: PM transition of the system being carried out. 283 * 284 * The driver of @dev will not receive interrupts while this function is being 285 * executed. 286 */ 287static int pm_noirq_op(struct device *dev, 288 const struct dev_pm_ops *ops, 289 pm_message_t state) 290{ 291 int error = 0; 292 ktime_t calltime = ktime_set(0, 0), delta, rettime; 293 294 if (initcall_debug) { 295 pr_info("calling %s+ @ %i, parent: %s\n", 296 dev_name(dev), task_pid_nr(current), 297 dev->parent ? dev_name(dev->parent) : "none"); 298 calltime = ktime_get(); 299 } 300 301 switch (state.event) { 302#ifdef CONFIG_SUSPEND 303 case PM_EVENT_SUSPEND: 304 if (ops->suspend_noirq) { 305 error = ops->suspend_noirq(dev); 306 suspend_report_result(ops->suspend_noirq, error); 307 } 308 break; 309 case PM_EVENT_RESUME: 310 if (ops->resume_noirq) { 311 error = ops->resume_noirq(dev); 312 suspend_report_result(ops->resume_noirq, error); 313 } 314 break; 315#endif /* CONFIG_SUSPEND */ 316#ifdef CONFIG_HIBERNATE_CALLBACKS 317 case PM_EVENT_FREEZE: 318 case PM_EVENT_QUIESCE: 319 if (ops->freeze_noirq) { 320 error = ops->freeze_noirq(dev); 321 suspend_report_result(ops->freeze_noirq, error); 322 } 323 break; 324 case PM_EVENT_HIBERNATE: 325 if (ops->poweroff_noirq) { 326 error = ops->poweroff_noirq(dev); 327 suspend_report_result(ops->poweroff_noirq, error); 328 } 329 break; 330 case PM_EVENT_THAW: 331 case PM_EVENT_RECOVER: 332 if (ops->thaw_noirq) { 333 error = ops->thaw_noirq(dev); 334 suspend_report_result(ops->thaw_noirq, error); 335 } 336 break; 337 case PM_EVENT_RESTORE: 338 if (ops->restore_noirq) { 339 error = ops->restore_noirq(dev); 340 suspend_report_result(ops->restore_noirq, error); 341 } 342 break; 343#endif /* CONFIG_HIBERNATE_CALLBACKS */ 344 default: 345 error = -EINVAL; 346 } 347 348 if (initcall_debug) { 349 rettime = ktime_get(); 350 delta = ktime_sub(rettime, calltime); 351 printk("initcall %s_i+ returned %d after %Ld usecs\n", 352 dev_name(dev), error, 353 (unsigned long long)ktime_to_ns(delta) >> 10); 354 } 355 356 return error; 357} 358 359static char *pm_verb(int event) 360{ 361 switch (event) { 362 case PM_EVENT_SUSPEND: 363 return "suspend"; 364 case PM_EVENT_RESUME: 365 return "resume"; 366 case PM_EVENT_FREEZE: 367 return "freeze"; 368 case PM_EVENT_QUIESCE: 369 return "quiesce"; 370 case PM_EVENT_HIBERNATE: 371 return "hibernate"; 372 case PM_EVENT_THAW: 373 return "thaw"; 374 case PM_EVENT_RESTORE: 375 return "restore"; 376 case PM_EVENT_RECOVER: 377 return "recover"; 378 default: 379 return "(unknown PM event)"; 380 } 381} 382 383static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) 384{ 385 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), 386 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? 387 ", may wakeup" : ""); 388} 389 390static void pm_dev_err(struct device *dev, pm_message_t state, char *info, 391 int error) 392{ 393 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", 394 dev_name(dev), pm_verb(state.event), info, error); 395} 396 397static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) 398{ 399 ktime_t calltime; 400 u64 usecs64; 401 int usecs; 402 403 calltime = ktime_get(); 404 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); 405 do_div(usecs64, NSEC_PER_USEC); 406 usecs = usecs64; 407 if (usecs == 0) 408 usecs = 1; 409 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 410 info ?: "", info ? " " : "", pm_verb(state.event), 411 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 412} 413 414/*------------------------- Resume routines -------------------------*/ 415 416/** 417 * device_resume_noirq - Execute an "early resume" callback for given device. 418 * @dev: Device to handle. 419 * @state: PM transition of the system being carried out. 420 * 421 * The driver of @dev will not receive interrupts while this function is being 422 * executed. 423 */ 424static int device_resume_noirq(struct device *dev, pm_message_t state) 425{ 426 int error = 0; 427 428 TRACE_DEVICE(dev); 429 TRACE_RESUME(0); 430 431 if (dev->pm_domain) { 432 pm_dev_dbg(dev, state, "EARLY power domain "); 433 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 434 } else if (dev->type && dev->type->pm) { 435 pm_dev_dbg(dev, state, "EARLY type "); 436 error = pm_noirq_op(dev, dev->type->pm, state); 437 } else if (dev->class && dev->class->pm) { 438 pm_dev_dbg(dev, state, "EARLY class "); 439 error = pm_noirq_op(dev, dev->class->pm, state); 440 } else if (dev->bus && dev->bus->pm) { 441 pm_dev_dbg(dev, state, "EARLY "); 442 error = pm_noirq_op(dev, dev->bus->pm, state); 443 } 444 445 TRACE_RESUME(error); 446 return error; 447} 448 449/** 450 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 451 * @state: PM transition of the system being carried out. 452 * 453 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 454 * enable device drivers to receive interrupts. 455 */ 456void dpm_resume_noirq(pm_message_t state) 457{ 458 ktime_t starttime = ktime_get(); 459 460 mutex_lock(&dpm_list_mtx); 461 while (!list_empty(&dpm_noirq_list)) { 462 struct device *dev = to_device(dpm_noirq_list.next); 463 int error; 464 465 get_device(dev); 466 list_move_tail(&dev->power.entry, &dpm_suspended_list); 467 mutex_unlock(&dpm_list_mtx); 468 469 error = device_resume_noirq(dev, state); 470 if (error) 471 pm_dev_err(dev, state, " early", error); 472 473 mutex_lock(&dpm_list_mtx); 474 put_device(dev); 475 } 476 mutex_unlock(&dpm_list_mtx); 477 dpm_show_time(starttime, state, "early"); 478 resume_device_irqs(); 479} 480EXPORT_SYMBOL_GPL(dpm_resume_noirq); 481 482/** 483 * legacy_resume - Execute a legacy (bus or class) resume callback for device. 484 * @dev: Device to resume. 485 * @cb: Resume callback to execute. 486 */ 487static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) 488{ 489 int error; 490 ktime_t calltime; 491 492 calltime = initcall_debug_start(dev); 493 494 error = cb(dev); 495 suspend_report_result(cb, error); 496 497 initcall_debug_report(dev, calltime, error); 498 499 return error; 500} 501 502/** 503 * device_resume - Execute "resume" callbacks for given device. 504 * @dev: Device to handle. 505 * @state: PM transition of the system being carried out. 506 * @async: If true, the device is being resumed asynchronously. 507 */ 508static int device_resume(struct device *dev, pm_message_t state, bool async) 509{ 510 int error = 0; 511 bool put = false; 512 513 TRACE_DEVICE(dev); 514 TRACE_RESUME(0); 515 516 dpm_wait(dev->parent, async); 517 device_lock(dev); 518 519 /* 520 * This is a fib. But we'll allow new children to be added below 521 * a resumed device, even if the device hasn't been completed yet. 522 */ 523 dev->power.is_prepared = false; 524 525 if (!dev->power.is_suspended) 526 goto Unlock; 527 528 pm_runtime_enable(dev); 529 put = true; 530 531 if (dev->pm_domain) { 532 pm_dev_dbg(dev, state, "power domain "); 533 error = pm_op(dev, &dev->pm_domain->ops, state); 534 goto End; 535 } 536 537 if (dev->type && dev->type->pm) { 538 pm_dev_dbg(dev, state, "type "); 539 error = pm_op(dev, dev->type->pm, state); 540 goto End; 541 } 542 543 if (dev->class) { 544 if (dev->class->pm) { 545 pm_dev_dbg(dev, state, "class "); 546 error = pm_op(dev, dev->class->pm, state); 547 goto End; 548 } else if (dev->class->resume) { 549 pm_dev_dbg(dev, state, "legacy class "); 550 error = legacy_resume(dev, dev->class->resume); 551 goto End; 552 } 553 } 554 555 if (dev->bus) { 556 if (dev->bus->pm) { 557 pm_dev_dbg(dev, state, ""); 558 error = pm_op(dev, dev->bus->pm, state); 559 } else if (dev->bus->resume) { 560 pm_dev_dbg(dev, state, "legacy "); 561 error = legacy_resume(dev, dev->bus->resume); 562 } 563 } 564 565 End: 566 dev->power.is_suspended = false; 567 568 Unlock: 569 device_unlock(dev); 570 complete_all(&dev->power.completion); 571 572 TRACE_RESUME(error); 573 574 if (put) 575 pm_runtime_put_sync(dev); 576 577 return error; 578} 579 580static void async_resume(void *data, async_cookie_t cookie) 581{ 582 struct device *dev = (struct device *)data; 583 int error; 584 585 error = device_resume(dev, pm_transition, true); 586 if (error) 587 pm_dev_err(dev, pm_transition, " async", error); 588 put_device(dev); 589} 590 591static bool is_async(struct device *dev) 592{ 593 return dev->power.async_suspend && pm_async_enabled 594 && !pm_trace_is_enabled(); 595} 596 597/** 598 * dpm_resume - Execute "resume" callbacks for non-sysdev devices. 599 * @state: PM transition of the system being carried out. 600 * 601 * Execute the appropriate "resume" callback for all devices whose status 602 * indicates that they are suspended. 603 */ 604void dpm_resume(pm_message_t state) 605{ 606 struct device *dev; 607 ktime_t starttime = ktime_get(); 608 609 might_sleep(); 610 611 mutex_lock(&dpm_list_mtx); 612 pm_transition = state; 613 async_error = 0; 614 615 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 616 INIT_COMPLETION(dev->power.completion); 617 if (is_async(dev)) { 618 get_device(dev); 619 async_schedule(async_resume, dev); 620 } 621 } 622 623 while (!list_empty(&dpm_suspended_list)) { 624 dev = to_device(dpm_suspended_list.next); 625 get_device(dev); 626 if (!is_async(dev)) { 627 int error; 628 629 mutex_unlock(&dpm_list_mtx); 630 631 error = device_resume(dev, state, false); 632 if (error) 633 pm_dev_err(dev, state, "", error); 634 635 mutex_lock(&dpm_list_mtx); 636 } 637 if (!list_empty(&dev->power.entry)) 638 list_move_tail(&dev->power.entry, &dpm_prepared_list); 639 put_device(dev); 640 } 641 mutex_unlock(&dpm_list_mtx); 642 async_synchronize_full(); 643 dpm_show_time(starttime, state, NULL); 644} 645 646/** 647 * device_complete - Complete a PM transition for given device. 648 * @dev: Device to handle. 649 * @state: PM transition of the system being carried out. 650 */ 651static void device_complete(struct device *dev, pm_message_t state) 652{ 653 device_lock(dev); 654 655 if (dev->pm_domain) { 656 pm_dev_dbg(dev, state, "completing power domain "); 657 if (dev->pm_domain->ops.complete) 658 dev->pm_domain->ops.complete(dev); 659 } else if (dev->type && dev->type->pm) { 660 pm_dev_dbg(dev, state, "completing type "); 661 if (dev->type->pm->complete) 662 dev->type->pm->complete(dev); 663 } else if (dev->class && dev->class->pm) { 664 pm_dev_dbg(dev, state, "completing class "); 665 if (dev->class->pm->complete) 666 dev->class->pm->complete(dev); 667 } else if (dev->bus && dev->bus->pm) { 668 pm_dev_dbg(dev, state, "completing "); 669 if (dev->bus->pm->complete) 670 dev->bus->pm->complete(dev); 671 } 672 673 device_unlock(dev); 674} 675 676/** 677 * dpm_complete - Complete a PM transition for all non-sysdev devices. 678 * @state: PM transition of the system being carried out. 679 * 680 * Execute the ->complete() callbacks for all devices whose PM status is not 681 * DPM_ON (this allows new devices to be registered). 682 */ 683void dpm_complete(pm_message_t state) 684{ 685 struct list_head list; 686 687 might_sleep(); 688 689 INIT_LIST_HEAD(&list); 690 mutex_lock(&dpm_list_mtx); 691 while (!list_empty(&dpm_prepared_list)) { 692 struct device *dev = to_device(dpm_prepared_list.prev); 693 694 get_device(dev); 695 dev->power.is_prepared = false; 696 list_move(&dev->power.entry, &list); 697 mutex_unlock(&dpm_list_mtx); 698 699 device_complete(dev, state); 700 701 mutex_lock(&dpm_list_mtx); 702 put_device(dev); 703 } 704 list_splice(&list, &dpm_list); 705 mutex_unlock(&dpm_list_mtx); 706} 707 708/** 709 * dpm_resume_end - Execute "resume" callbacks and complete system transition. 710 * @state: PM transition of the system being carried out. 711 * 712 * Execute "resume" callbacks for all devices and complete the PM transition of 713 * the system. 714 */ 715void dpm_resume_end(pm_message_t state) 716{ 717 dpm_resume(state); 718 dpm_complete(state); 719} 720EXPORT_SYMBOL_GPL(dpm_resume_end); 721 722 723/*------------------------- Suspend routines -------------------------*/ 724 725/** 726 * resume_event - Return a "resume" message for given "suspend" sleep state. 727 * @sleep_state: PM message representing a sleep state. 728 * 729 * Return a PM message representing the resume event corresponding to given 730 * sleep state. 731 */ 732static pm_message_t resume_event(pm_message_t sleep_state) 733{ 734 switch (sleep_state.event) { 735 case PM_EVENT_SUSPEND: 736 return PMSG_RESUME; 737 case PM_EVENT_FREEZE: 738 case PM_EVENT_QUIESCE: 739 return PMSG_RECOVER; 740 case PM_EVENT_HIBERNATE: 741 return PMSG_RESTORE; 742 } 743 return PMSG_ON; 744} 745 746/** 747 * device_suspend_noirq - Execute a "late suspend" callback for given device. 748 * @dev: Device to handle. 749 * @state: PM transition of the system being carried out. 750 * 751 * The driver of @dev will not receive interrupts while this function is being 752 * executed. 753 */ 754static int device_suspend_noirq(struct device *dev, pm_message_t state) 755{ 756 int error; 757 758 if (dev->pm_domain) { 759 pm_dev_dbg(dev, state, "LATE power domain "); 760 error = pm_noirq_op(dev, &dev->pm_domain->ops, state); 761 if (error) 762 return error; 763 } else if (dev->type && dev->type->pm) { 764 pm_dev_dbg(dev, state, "LATE type "); 765 error = pm_noirq_op(dev, dev->type->pm, state); 766 if (error) 767 return error; 768 } else if (dev->class && dev->class->pm) { 769 pm_dev_dbg(dev, state, "LATE class "); 770 error = pm_noirq_op(dev, dev->class->pm, state); 771 if (error) 772 return error; 773 } else if (dev->bus && dev->bus->pm) { 774 pm_dev_dbg(dev, state, "LATE "); 775 error = pm_noirq_op(dev, dev->bus->pm, state); 776 if (error) 777 return error; 778 } 779 780 return 0; 781} 782 783/** 784 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 785 * @state: PM transition of the system being carried out. 786 * 787 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 788 * handlers for all non-sysdev devices. 789 */ 790int dpm_suspend_noirq(pm_message_t state) 791{ 792 ktime_t starttime = ktime_get(); 793 int error = 0; 794 795 suspend_device_irqs(); 796 mutex_lock(&dpm_list_mtx); 797 while (!list_empty(&dpm_suspended_list)) { 798 struct device *dev = to_device(dpm_suspended_list.prev); 799 800 get_device(dev); 801 mutex_unlock(&dpm_list_mtx); 802 803 error = device_suspend_noirq(dev, state); 804 805 mutex_lock(&dpm_list_mtx); 806 if (error) { 807 pm_dev_err(dev, state, " late", error); 808 put_device(dev); 809 break; 810 } 811 if (!list_empty(&dev->power.entry)) 812 list_move(&dev->power.entry, &dpm_noirq_list); 813 put_device(dev); 814 } 815 mutex_unlock(&dpm_list_mtx); 816 if (error) 817 dpm_resume_noirq(resume_event(state)); 818 else 819 dpm_show_time(starttime, state, "late"); 820 return error; 821} 822EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 823 824/** 825 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 826 * @dev: Device to suspend. 827 * @state: PM transition of the system being carried out. 828 * @cb: Suspend callback to execute. 829 */ 830static int legacy_suspend(struct device *dev, pm_message_t state, 831 int (*cb)(struct device *dev, pm_message_t state)) 832{ 833 int error; 834 ktime_t calltime; 835 836 calltime = initcall_debug_start(dev); 837 838 error = cb(dev, state); 839 suspend_report_result(cb, error); 840 841 initcall_debug_report(dev, calltime, error); 842 843 return error; 844} 845 846/** 847 * device_suspend - Execute "suspend" callbacks for given device. 848 * @dev: Device to handle. 849 * @state: PM transition of the system being carried out. 850 * @async: If true, the device is being suspended asynchronously. 851 */ 852static int __device_suspend(struct device *dev, pm_message_t state, bool async) 853{ 854 int error = 0; 855 856 dpm_wait_for_children(dev, async); 857 858 if (async_error) 859 return 0; 860 861 pm_runtime_get_noresume(dev); 862 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 863 pm_wakeup_event(dev, 0); 864 865 if (pm_wakeup_pending()) { 866 pm_runtime_put_sync(dev); 867 async_error = -EBUSY; 868 return 0; 869 } 870 871 device_lock(dev); 872 873 if (dev->pm_domain) { 874 pm_dev_dbg(dev, state, "power domain "); 875 error = pm_op(dev, &dev->pm_domain->ops, state); 876 goto End; 877 } 878 879 if (dev->type && dev->type->pm) { 880 pm_dev_dbg(dev, state, "type "); 881 error = pm_op(dev, dev->type->pm, state); 882 goto End; 883 } 884 885 if (dev->class) { 886 if (dev->class->pm) { 887 pm_dev_dbg(dev, state, "class "); 888 error = pm_op(dev, dev->class->pm, state); 889 goto End; 890 } else if (dev->class->suspend) { 891 pm_dev_dbg(dev, state, "legacy class "); 892 error = legacy_suspend(dev, state, dev->class->suspend); 893 goto End; 894 } 895 } 896 897 if (dev->bus) { 898 if (dev->bus->pm) { 899 pm_dev_dbg(dev, state, ""); 900 error = pm_op(dev, dev->bus->pm, state); 901 } else if (dev->bus->suspend) { 902 pm_dev_dbg(dev, state, "legacy "); 903 error = legacy_suspend(dev, state, dev->bus->suspend); 904 } 905 } 906 907 End: 908 dev->power.is_suspended = !error; 909 910 device_unlock(dev); 911 complete_all(&dev->power.completion); 912 913 if (error) { 914 pm_runtime_put_sync(dev); 915 async_error = error; 916 } else if (dev->power.is_suspended) { 917 __pm_runtime_disable(dev, false); 918 } 919 920 return error; 921} 922 923static void async_suspend(void *data, async_cookie_t cookie) 924{ 925 struct device *dev = (struct device *)data; 926 int error; 927 928 error = __device_suspend(dev, pm_transition, true); 929 if (error) 930 pm_dev_err(dev, pm_transition, " async", error); 931 932 put_device(dev); 933} 934 935static int device_suspend(struct device *dev) 936{ 937 INIT_COMPLETION(dev->power.completion); 938 939 if (pm_async_enabled && dev->power.async_suspend) { 940 get_device(dev); 941 async_schedule(async_suspend, dev); 942 return 0; 943 } 944 945 return __device_suspend(dev, pm_transition, false); 946} 947 948/** 949 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. 950 * @state: PM transition of the system being carried out. 951 */ 952int dpm_suspend(pm_message_t state) 953{ 954 ktime_t starttime = ktime_get(); 955 int error = 0; 956 957 might_sleep(); 958 959 mutex_lock(&dpm_list_mtx); 960 pm_transition = state; 961 async_error = 0; 962 while (!list_empty(&dpm_prepared_list)) { 963 struct device *dev = to_device(dpm_prepared_list.prev); 964 965 get_device(dev); 966 mutex_unlock(&dpm_list_mtx); 967 968 error = device_suspend(dev); 969 970 mutex_lock(&dpm_list_mtx); 971 if (error) { 972 pm_dev_err(dev, state, "", error); 973 put_device(dev); 974 break; 975 } 976 if (!list_empty(&dev->power.entry)) 977 list_move(&dev->power.entry, &dpm_suspended_list); 978 put_device(dev); 979 if (async_error) 980 break; 981 } 982 mutex_unlock(&dpm_list_mtx); 983 async_synchronize_full(); 984 if (!error) 985 error = async_error; 986 if (!error) 987 dpm_show_time(starttime, state, NULL); 988 return error; 989} 990 991/** 992 * device_prepare - Prepare a device for system power transition. 993 * @dev: Device to handle. 994 * @state: PM transition of the system being carried out. 995 * 996 * Execute the ->prepare() callback(s) for given device. No new children of the 997 * device may be registered after this function has returned. 998 */ 999static int device_prepare(struct device *dev, pm_message_t state) 1000{ 1001 int error = 0; 1002 1003 device_lock(dev); 1004 1005 if (dev->pm_domain) { 1006 pm_dev_dbg(dev, state, "preparing power domain "); 1007 if (dev->pm_domain->ops.prepare) 1008 error = dev->pm_domain->ops.prepare(dev); 1009 suspend_report_result(dev->pm_domain->ops.prepare, error); 1010 if (error) 1011 goto End; 1012 } else if (dev->type && dev->type->pm) { 1013 pm_dev_dbg(dev, state, "preparing type "); 1014 if (dev->type->pm->prepare) 1015 error = dev->type->pm->prepare(dev); 1016 suspend_report_result(dev->type->pm->prepare, error); 1017 if (error) 1018 goto End; 1019 } else if (dev->class && dev->class->pm) { 1020 pm_dev_dbg(dev, state, "preparing class "); 1021 if (dev->class->pm->prepare) 1022 error = dev->class->pm->prepare(dev); 1023 suspend_report_result(dev->class->pm->prepare, error); 1024 if (error) 1025 goto End; 1026 } else if (dev->bus && dev->bus->pm) { 1027 pm_dev_dbg(dev, state, "preparing "); 1028 if (dev->bus->pm->prepare) 1029 error = dev->bus->pm->prepare(dev); 1030 suspend_report_result(dev->bus->pm->prepare, error); 1031 } 1032 1033 End: 1034 device_unlock(dev); 1035 1036 return error; 1037} 1038 1039/** 1040 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. 1041 * @state: PM transition of the system being carried out. 1042 * 1043 * Execute the ->prepare() callback(s) for all devices. 1044 */ 1045int dpm_prepare(pm_message_t state) 1046{ 1047 int error = 0; 1048 1049 might_sleep(); 1050 1051 mutex_lock(&dpm_list_mtx); 1052 while (!list_empty(&dpm_list)) { 1053 struct device *dev = to_device(dpm_list.next); 1054 1055 get_device(dev); 1056 mutex_unlock(&dpm_list_mtx); 1057 1058 error = device_prepare(dev, state); 1059 1060 mutex_lock(&dpm_list_mtx); 1061 if (error) { 1062 if (error == -EAGAIN) { 1063 put_device(dev); 1064 error = 0; 1065 continue; 1066 } 1067 printk(KERN_INFO "PM: Device %s not prepared " 1068 "for power transition: code %d\n", 1069 dev_name(dev), error); 1070 put_device(dev); 1071 break; 1072 } 1073 dev->power.is_prepared = true; 1074 if (!list_empty(&dev->power.entry)) 1075 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1076 put_device(dev); 1077 } 1078 mutex_unlock(&dpm_list_mtx); 1079 return error; 1080} 1081 1082/** 1083 * dpm_suspend_start - Prepare devices for PM transition and suspend them. 1084 * @state: PM transition of the system being carried out. 1085 * 1086 * Prepare all non-sysdev devices for system PM transition and execute "suspend" 1087 * callbacks for them. 1088 */ 1089int dpm_suspend_start(pm_message_t state) 1090{ 1091 int error; 1092 1093 error = dpm_prepare(state); 1094 if (!error) 1095 error = dpm_suspend(state); 1096 return error; 1097} 1098EXPORT_SYMBOL_GPL(dpm_suspend_start); 1099 1100void __suspend_report_result(const char *function, void *fn, int ret) 1101{ 1102 if (ret) 1103 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); 1104} 1105EXPORT_SYMBOL_GPL(__suspend_report_result); 1106 1107/** 1108 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. 1109 * @dev: Device to wait for. 1110 * @subordinate: Device that needs to wait for @dev. 1111 */ 1112int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) 1113{ 1114 dpm_wait(dev, subordinate->power.async_suspend); 1115 return async_error; 1116} 1117EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); 1118