domain.c revision 40114447a7f89860b46a64e5504f313656cb5f27
1/* 2 * drivers/base/power/domain.c - Common code related to device power domains. 3 * 4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 * 6 * This file is released under the GPLv2. 7 */ 8 9#include <linux/init.h> 10#include <linux/kernel.h> 11#include <linux/io.h> 12#include <linux/pm_runtime.h> 13#include <linux/pm_domain.h> 14#include <linux/pm_qos.h> 15#include <linux/slab.h> 16#include <linux/err.h> 17#include <linux/sched.h> 18#include <linux/suspend.h> 19#include <linux/export.h> 20 21#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 22({ \ 23 type (*__routine)(struct device *__d); \ 24 type __ret = (type)0; \ 25 \ 26 __routine = genpd->dev_ops.callback; \ 27 if (__routine) { \ 28 __ret = __routine(dev); \ 29 } else { \ 30 __routine = dev_gpd_data(dev)->ops.callback; \ 31 if (__routine) \ 32 __ret = __routine(dev); \ 33 } \ 34 __ret; \ 35}) 36 37#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ 38({ \ 39 ktime_t __start = ktime_get(); \ 40 type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ 41 s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ 42 struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \ 43 if (!__retval && __elapsed > __td->field) { \ 44 __td->field = __elapsed; \ 45 dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ 46 __elapsed); \ 47 genpd->max_off_time_changed = true; \ 48 __td->constraint_changed = true; \ 49 } \ 50 __retval; \ 51}) 52 53static LIST_HEAD(gpd_list); 54static DEFINE_MUTEX(gpd_list_lock); 55 56static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name) 57{ 58 struct generic_pm_domain *genpd = NULL, *gpd; 59 60 if (IS_ERR_OR_NULL(domain_name)) 61 return NULL; 62 63 mutex_lock(&gpd_list_lock); 64 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 65 if (!strcmp(gpd->name, domain_name)) { 66 genpd = gpd; 67 break; 68 } 69 } 70 mutex_unlock(&gpd_list_lock); 71 return genpd; 72} 73 74#ifdef CONFIG_PM 75 76struct generic_pm_domain *dev_to_genpd(struct device *dev) 77{ 78 if (IS_ERR_OR_NULL(dev->pm_domain)) 79 return ERR_PTR(-EINVAL); 80 81 return pd_to_genpd(dev->pm_domain); 82} 83 84static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) 85{ 86 return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, 87 stop_latency_ns, "stop"); 88} 89 90static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) 91{ 92 return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, 93 start_latency_ns, "start"); 94} 95 96static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, 97 struct device *dev) 98{ 99 return GENPD_DEV_CALLBACK(genpd, int, start, dev); 100} 101 102static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) 103{ 104 bool ret = false; 105 106 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) 107 ret = !!atomic_dec_and_test(&genpd->sd_count); 108 109 return ret; 110} 111 112static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 113{ 114 atomic_inc(&genpd->sd_count); 115 smp_mb__after_atomic_inc(); 116} 117 118static void genpd_acquire_lock(struct generic_pm_domain *genpd) 119{ 120 DEFINE_WAIT(wait); 121 122 mutex_lock(&genpd->lock); 123 /* 124 * Wait for the domain to transition into either the active, 125 * or the power off state. 126 */ 127 for (;;) { 128 prepare_to_wait(&genpd->status_wait_queue, &wait, 129 TASK_UNINTERRUPTIBLE); 130 if (genpd->status == GPD_STATE_ACTIVE 131 || genpd->status == GPD_STATE_POWER_OFF) 132 break; 133 mutex_unlock(&genpd->lock); 134 135 schedule(); 136 137 mutex_lock(&genpd->lock); 138 } 139 finish_wait(&genpd->status_wait_queue, &wait); 140} 141 142static void genpd_release_lock(struct generic_pm_domain *genpd) 143{ 144 mutex_unlock(&genpd->lock); 145} 146 147static void genpd_set_active(struct generic_pm_domain *genpd) 148{ 149 if (genpd->resume_count == 0) 150 genpd->status = GPD_STATE_ACTIVE; 151} 152 153static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) 154{ 155 s64 usecs64; 156 157 if (!genpd->cpu_data) 158 return; 159 160 usecs64 = genpd->power_on_latency_ns; 161 do_div(usecs64, NSEC_PER_USEC); 162 usecs64 += genpd->cpu_data->saved_exit_latency; 163 genpd->cpu_data->idle_state->exit_latency = usecs64; 164} 165 166/** 167 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 168 * @genpd: PM domain to power up. 169 * 170 * Restore power to @genpd and all of its masters so that it is possible to 171 * resume a device belonging to it. 172 */ 173static int __pm_genpd_poweron(struct generic_pm_domain *genpd) 174 __releases(&genpd->lock) __acquires(&genpd->lock) 175{ 176 struct gpd_link *link; 177 DEFINE_WAIT(wait); 178 int ret = 0; 179 180 /* If the domain's master is being waited for, we have to wait too. */ 181 for (;;) { 182 prepare_to_wait(&genpd->status_wait_queue, &wait, 183 TASK_UNINTERRUPTIBLE); 184 if (genpd->status != GPD_STATE_WAIT_MASTER) 185 break; 186 mutex_unlock(&genpd->lock); 187 188 schedule(); 189 190 mutex_lock(&genpd->lock); 191 } 192 finish_wait(&genpd->status_wait_queue, &wait); 193 194 if (genpd->status == GPD_STATE_ACTIVE 195 || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 196 return 0; 197 198 if (genpd->status != GPD_STATE_POWER_OFF) { 199 genpd_set_active(genpd); 200 return 0; 201 } 202 203 if (genpd->cpu_data) { 204 cpuidle_pause_and_lock(); 205 genpd->cpu_data->idle_state->disabled = true; 206 cpuidle_resume_and_unlock(); 207 goto out; 208 } 209 210 /* 211 * The list is guaranteed not to change while the loop below is being 212 * executed, unless one of the masters' .power_on() callbacks fiddles 213 * with it. 214 */ 215 list_for_each_entry(link, &genpd->slave_links, slave_node) { 216 genpd_sd_counter_inc(link->master); 217 genpd->status = GPD_STATE_WAIT_MASTER; 218 219 mutex_unlock(&genpd->lock); 220 221 ret = pm_genpd_poweron(link->master); 222 223 mutex_lock(&genpd->lock); 224 225 /* 226 * The "wait for parent" status is guaranteed not to change 227 * while the master is powering on. 228 */ 229 genpd->status = GPD_STATE_POWER_OFF; 230 wake_up_all(&genpd->status_wait_queue); 231 if (ret) { 232 genpd_sd_counter_dec(link->master); 233 goto err; 234 } 235 } 236 237 if (genpd->power_on) { 238 ktime_t time_start = ktime_get(); 239 s64 elapsed_ns; 240 241 ret = genpd->power_on(genpd); 242 if (ret) 243 goto err; 244 245 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 246 if (elapsed_ns > genpd->power_on_latency_ns) { 247 genpd->power_on_latency_ns = elapsed_ns; 248 genpd->max_off_time_changed = true; 249 genpd_recalc_cpu_exit_latency(genpd); 250 if (genpd->name) 251 pr_warning("%s: Power-on latency exceeded, " 252 "new value %lld ns\n", genpd->name, 253 elapsed_ns); 254 } 255 } 256 257 out: 258 genpd_set_active(genpd); 259 260 return 0; 261 262 err: 263 list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) 264 genpd_sd_counter_dec(link->master); 265 266 return ret; 267} 268 269/** 270 * pm_genpd_poweron - Restore power to a given PM domain and its masters. 271 * @genpd: PM domain to power up. 272 */ 273int pm_genpd_poweron(struct generic_pm_domain *genpd) 274{ 275 int ret; 276 277 mutex_lock(&genpd->lock); 278 ret = __pm_genpd_poweron(genpd); 279 mutex_unlock(&genpd->lock); 280 return ret; 281} 282 283/** 284 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters. 285 * @domain_name: Name of the PM domain to power up. 286 */ 287int pm_genpd_name_poweron(const char *domain_name) 288{ 289 struct generic_pm_domain *genpd; 290 291 genpd = pm_genpd_lookup_name(domain_name); 292 return genpd ? pm_genpd_poweron(genpd) : -EINVAL; 293} 294 295#endif /* CONFIG_PM */ 296 297#ifdef CONFIG_PM_RUNTIME 298 299static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) 300{ 301 return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, 302 save_state_latency_ns, "state save"); 303} 304 305static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) 306{ 307 return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, 308 restore_state_latency_ns, 309 "state restore"); 310} 311 312static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 313 unsigned long val, void *ptr) 314{ 315 struct generic_pm_domain_data *gpd_data; 316 struct device *dev; 317 318 gpd_data = container_of(nb, struct generic_pm_domain_data, nb); 319 320 mutex_lock(&gpd_data->lock); 321 dev = gpd_data->base.dev; 322 if (!dev) { 323 mutex_unlock(&gpd_data->lock); 324 return NOTIFY_DONE; 325 } 326 mutex_unlock(&gpd_data->lock); 327 328 for (;;) { 329 struct generic_pm_domain *genpd; 330 struct pm_domain_data *pdd; 331 332 spin_lock_irq(&dev->power.lock); 333 334 pdd = dev->power.subsys_data ? 335 dev->power.subsys_data->domain_data : NULL; 336 if (pdd && pdd->dev) { 337 to_gpd_data(pdd)->td.constraint_changed = true; 338 genpd = dev_to_genpd(dev); 339 } else { 340 genpd = ERR_PTR(-ENODATA); 341 } 342 343 spin_unlock_irq(&dev->power.lock); 344 345 if (!IS_ERR(genpd)) { 346 mutex_lock(&genpd->lock); 347 genpd->max_off_time_changed = true; 348 mutex_unlock(&genpd->lock); 349 } 350 351 dev = dev->parent; 352 if (!dev || dev->power.ignore_children) 353 break; 354 } 355 356 return NOTIFY_DONE; 357} 358 359/** 360 * __pm_genpd_save_device - Save the pre-suspend state of a device. 361 * @pdd: Domain data of the device to save the state of. 362 * @genpd: PM domain the device belongs to. 363 */ 364static int __pm_genpd_save_device(struct pm_domain_data *pdd, 365 struct generic_pm_domain *genpd) 366 __releases(&genpd->lock) __acquires(&genpd->lock) 367{ 368 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 369 struct device *dev = pdd->dev; 370 int ret = 0; 371 372 if (gpd_data->need_restore) 373 return 0; 374 375 mutex_unlock(&genpd->lock); 376 377 genpd_start_dev(genpd, dev); 378 ret = genpd_save_dev(genpd, dev); 379 genpd_stop_dev(genpd, dev); 380 381 mutex_lock(&genpd->lock); 382 383 if (!ret) 384 gpd_data->need_restore = true; 385 386 return ret; 387} 388 389/** 390 * __pm_genpd_restore_device - Restore the pre-suspend state of a device. 391 * @pdd: Domain data of the device to restore the state of. 392 * @genpd: PM domain the device belongs to. 393 */ 394static void __pm_genpd_restore_device(struct pm_domain_data *pdd, 395 struct generic_pm_domain *genpd) 396 __releases(&genpd->lock) __acquires(&genpd->lock) 397{ 398 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 399 struct device *dev = pdd->dev; 400 bool need_restore = gpd_data->need_restore; 401 402 gpd_data->need_restore = false; 403 mutex_unlock(&genpd->lock); 404 405 genpd_start_dev(genpd, dev); 406 if (need_restore) 407 genpd_restore_dev(genpd, dev); 408 409 mutex_lock(&genpd->lock); 410} 411 412/** 413 * genpd_abort_poweroff - Check if a PM domain power off should be aborted. 414 * @genpd: PM domain to check. 415 * 416 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during 417 * a "power off" operation, which means that a "power on" has occured in the 418 * meantime, or if its resume_count field is different from zero, which means 419 * that one of its devices has been resumed in the meantime. 420 */ 421static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) 422{ 423 return genpd->status == GPD_STATE_WAIT_MASTER 424 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; 425} 426 427/** 428 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). 429 * @genpd: PM domait to power off. 430 * 431 * Queue up the execution of pm_genpd_poweroff() unless it's already been done 432 * before. 433 */ 434void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 435{ 436 if (!work_pending(&genpd->power_off_work)) 437 queue_work(pm_wq, &genpd->power_off_work); 438} 439 440/** 441 * pm_genpd_poweroff - Remove power from a given PM domain. 442 * @genpd: PM domain to power down. 443 * 444 * If all of the @genpd's devices have been suspended and all of its subdomains 445 * have been powered down, run the runtime suspend callbacks provided by all of 446 * the @genpd's devices' drivers and remove power from @genpd. 447 */ 448static int pm_genpd_poweroff(struct generic_pm_domain *genpd) 449 __releases(&genpd->lock) __acquires(&genpd->lock) 450{ 451 struct pm_domain_data *pdd; 452 struct gpd_link *link; 453 unsigned int not_suspended; 454 int ret = 0; 455 456 start: 457 /* 458 * Do not try to power off the domain in the following situations: 459 * (1) The domain is already in the "power off" state. 460 * (2) The domain is waiting for its master to power up. 461 * (3) One of the domain's devices is being resumed right now. 462 * (4) System suspend is in progress. 463 */ 464 if (genpd->status == GPD_STATE_POWER_OFF 465 || genpd->status == GPD_STATE_WAIT_MASTER 466 || genpd->resume_count > 0 || genpd->prepared_count > 0) 467 return 0; 468 469 if (atomic_read(&genpd->sd_count) > 0) 470 return -EBUSY; 471 472 not_suspended = 0; 473 list_for_each_entry(pdd, &genpd->dev_list, list_node) 474 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 475 || pdd->dev->power.irq_safe)) 476 not_suspended++; 477 478 if (not_suspended > genpd->in_progress) 479 return -EBUSY; 480 481 if (genpd->poweroff_task) { 482 /* 483 * Another instance of pm_genpd_poweroff() is executing 484 * callbacks, so tell it to start over and return. 485 */ 486 genpd->status = GPD_STATE_REPEAT; 487 return 0; 488 } 489 490 if (genpd->gov && genpd->gov->power_down_ok) { 491 if (!genpd->gov->power_down_ok(&genpd->domain)) 492 return -EAGAIN; 493 } 494 495 genpd->status = GPD_STATE_BUSY; 496 genpd->poweroff_task = current; 497 498 list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { 499 ret = atomic_read(&genpd->sd_count) == 0 ? 500 __pm_genpd_save_device(pdd, genpd) : -EBUSY; 501 502 if (genpd_abort_poweroff(genpd)) 503 goto out; 504 505 if (ret) { 506 genpd_set_active(genpd); 507 goto out; 508 } 509 510 if (genpd->status == GPD_STATE_REPEAT) { 511 genpd->poweroff_task = NULL; 512 goto start; 513 } 514 } 515 516 if (genpd->cpu_data) { 517 /* 518 * If cpu_data is set, cpuidle should turn the domain off when 519 * the CPU in it is idle. In that case we don't decrement the 520 * subdomain counts of the master domains, so that power is not 521 * removed from the current domain prematurely as a result of 522 * cutting off the masters' power. 523 */ 524 genpd->status = GPD_STATE_POWER_OFF; 525 cpuidle_pause_and_lock(); 526 genpd->cpu_data->idle_state->disabled = false; 527 cpuidle_resume_and_unlock(); 528 goto out; 529 } 530 531 if (genpd->power_off) { 532 ktime_t time_start; 533 s64 elapsed_ns; 534 535 if (atomic_read(&genpd->sd_count) > 0) { 536 ret = -EBUSY; 537 goto out; 538 } 539 540 time_start = ktime_get(); 541 542 /* 543 * If sd_count > 0 at this point, one of the subdomains hasn't 544 * managed to call pm_genpd_poweron() for the master yet after 545 * incrementing it. In that case pm_genpd_poweron() will wait 546 * for us to drop the lock, so we can call .power_off() and let 547 * the pm_genpd_poweron() restore power for us (this shouldn't 548 * happen very often). 549 */ 550 ret = genpd->power_off(genpd); 551 if (ret == -EBUSY) { 552 genpd_set_active(genpd); 553 goto out; 554 } 555 556 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 557 if (elapsed_ns > genpd->power_off_latency_ns) { 558 genpd->power_off_latency_ns = elapsed_ns; 559 genpd->max_off_time_changed = true; 560 if (genpd->name) 561 pr_warning("%s: Power-off latency exceeded, " 562 "new value %lld ns\n", genpd->name, 563 elapsed_ns); 564 } 565 } 566 567 genpd->status = GPD_STATE_POWER_OFF; 568 569 list_for_each_entry(link, &genpd->slave_links, slave_node) { 570 genpd_sd_counter_dec(link->master); 571 genpd_queue_power_off_work(link->master); 572 } 573 574 out: 575 genpd->poweroff_task = NULL; 576 wake_up_all(&genpd->status_wait_queue); 577 return ret; 578} 579 580/** 581 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. 582 * @work: Work structure used for scheduling the execution of this function. 583 */ 584static void genpd_power_off_work_fn(struct work_struct *work) 585{ 586 struct generic_pm_domain *genpd; 587 588 genpd = container_of(work, struct generic_pm_domain, power_off_work); 589 590 genpd_acquire_lock(genpd); 591 pm_genpd_poweroff(genpd); 592 genpd_release_lock(genpd); 593} 594 595/** 596 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. 597 * @dev: Device to suspend. 598 * 599 * Carry out a runtime suspend of a device under the assumption that its 600 * pm_domain field points to the domain member of an object of type 601 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 602 */ 603static int pm_genpd_runtime_suspend(struct device *dev) 604{ 605 struct generic_pm_domain *genpd; 606 bool (*stop_ok)(struct device *__dev); 607 int ret; 608 609 dev_dbg(dev, "%s()\n", __func__); 610 611 genpd = dev_to_genpd(dev); 612 if (IS_ERR(genpd)) 613 return -EINVAL; 614 615 might_sleep_if(!genpd->dev_irq_safe); 616 617 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 618 if (stop_ok && !stop_ok(dev)) 619 return -EBUSY; 620 621 ret = genpd_stop_dev(genpd, dev); 622 if (ret) 623 return ret; 624 625 /* 626 * If power.irq_safe is set, this routine will be run with interrupts 627 * off, so it can't use mutexes. 628 */ 629 if (dev->power.irq_safe) 630 return 0; 631 632 mutex_lock(&genpd->lock); 633 genpd->in_progress++; 634 pm_genpd_poweroff(genpd); 635 genpd->in_progress--; 636 mutex_unlock(&genpd->lock); 637 638 return 0; 639} 640 641/** 642 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. 643 * @dev: Device to resume. 644 * 645 * Carry out a runtime resume of a device under the assumption that its 646 * pm_domain field points to the domain member of an object of type 647 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 648 */ 649static int pm_genpd_runtime_resume(struct device *dev) 650{ 651 struct generic_pm_domain *genpd; 652 DEFINE_WAIT(wait); 653 int ret; 654 655 dev_dbg(dev, "%s()\n", __func__); 656 657 genpd = dev_to_genpd(dev); 658 if (IS_ERR(genpd)) 659 return -EINVAL; 660 661 might_sleep_if(!genpd->dev_irq_safe); 662 663 /* If power.irq_safe, the PM domain is never powered off. */ 664 if (dev->power.irq_safe) 665 return genpd_start_dev_no_timing(genpd, dev); 666 667 mutex_lock(&genpd->lock); 668 ret = __pm_genpd_poweron(genpd); 669 if (ret) { 670 mutex_unlock(&genpd->lock); 671 return ret; 672 } 673 genpd->status = GPD_STATE_BUSY; 674 genpd->resume_count++; 675 for (;;) { 676 prepare_to_wait(&genpd->status_wait_queue, &wait, 677 TASK_UNINTERRUPTIBLE); 678 /* 679 * If current is the powering off task, we have been called 680 * reentrantly from one of the device callbacks, so we should 681 * not wait. 682 */ 683 if (!genpd->poweroff_task || genpd->poweroff_task == current) 684 break; 685 mutex_unlock(&genpd->lock); 686 687 schedule(); 688 689 mutex_lock(&genpd->lock); 690 } 691 finish_wait(&genpd->status_wait_queue, &wait); 692 __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); 693 genpd->resume_count--; 694 genpd_set_active(genpd); 695 wake_up_all(&genpd->status_wait_queue); 696 mutex_unlock(&genpd->lock); 697 698 return 0; 699} 700 701/** 702 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. 703 */ 704void pm_genpd_poweroff_unused(void) 705{ 706 struct generic_pm_domain *genpd; 707 708 mutex_lock(&gpd_list_lock); 709 710 list_for_each_entry(genpd, &gpd_list, gpd_list_node) 711 genpd_queue_power_off_work(genpd); 712 713 mutex_unlock(&gpd_list_lock); 714} 715 716#else 717 718static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb, 719 unsigned long val, void *ptr) 720{ 721 return NOTIFY_DONE; 722} 723 724static inline void genpd_power_off_work_fn(struct work_struct *work) {} 725 726#define pm_genpd_runtime_suspend NULL 727#define pm_genpd_runtime_resume NULL 728 729#endif /* CONFIG_PM_RUNTIME */ 730 731#ifdef CONFIG_PM_SLEEP 732 733/** 734 * pm_genpd_present - Check if the given PM domain has been initialized. 735 * @genpd: PM domain to check. 736 */ 737static bool pm_genpd_present(struct generic_pm_domain *genpd) 738{ 739 struct generic_pm_domain *gpd; 740 741 if (IS_ERR_OR_NULL(genpd)) 742 return false; 743 744 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 745 if (gpd == genpd) 746 return true; 747 748 return false; 749} 750 751static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, 752 struct device *dev) 753{ 754 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); 755} 756 757static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) 758{ 759 return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); 760} 761 762static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) 763{ 764 return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); 765} 766 767static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) 768{ 769 return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); 770} 771 772static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) 773{ 774 return GENPD_DEV_CALLBACK(genpd, int, resume, dev); 775} 776 777static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) 778{ 779 return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); 780} 781 782static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) 783{ 784 return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); 785} 786 787static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) 788{ 789 return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); 790} 791 792static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) 793{ 794 return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); 795} 796 797/** 798 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. 799 * @genpd: PM domain to power off, if possible. 800 * 801 * Check if the given PM domain can be powered off (during system suspend or 802 * hibernation) and do that if so. Also, in that case propagate to its masters. 803 * 804 * This function is only called in "noirq" and "syscore" stages of system power 805 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 806 * executed sequentially, so it is guaranteed that it will never run twice in 807 * parallel). 808 */ 809static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) 810{ 811 struct gpd_link *link; 812 813 if (genpd->status == GPD_STATE_POWER_OFF) 814 return; 815 816 if (genpd->suspended_count != genpd->device_count 817 || atomic_read(&genpd->sd_count) > 0) 818 return; 819 820 if (genpd->power_off) 821 genpd->power_off(genpd); 822 823 genpd->status = GPD_STATE_POWER_OFF; 824 825 list_for_each_entry(link, &genpd->slave_links, slave_node) { 826 genpd_sd_counter_dec(link->master); 827 pm_genpd_sync_poweroff(link->master); 828 } 829} 830 831/** 832 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters. 833 * @genpd: PM domain to power on. 834 * 835 * This function is only called in "noirq" and "syscore" stages of system power 836 * transitions, so it need not acquire locks (all of the "noirq" callbacks are 837 * executed sequentially, so it is guaranteed that it will never run twice in 838 * parallel). 839 */ 840static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd) 841{ 842 struct gpd_link *link; 843 844 if (genpd->status != GPD_STATE_POWER_OFF) 845 return; 846 847 list_for_each_entry(link, &genpd->slave_links, slave_node) { 848 pm_genpd_sync_poweron(link->master); 849 genpd_sd_counter_inc(link->master); 850 } 851 852 if (genpd->power_on) 853 genpd->power_on(genpd); 854 855 genpd->status = GPD_STATE_ACTIVE; 856} 857 858/** 859 * resume_needed - Check whether to resume a device before system suspend. 860 * @dev: Device to check. 861 * @genpd: PM domain the device belongs to. 862 * 863 * There are two cases in which a device that can wake up the system from sleep 864 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled 865 * to wake up the system and it has to remain active for this purpose while the 866 * system is in the sleep state and (2) if the device is not enabled to wake up 867 * the system from sleep states and it generally doesn't generate wakeup signals 868 * by itself (those signals are generated on its behalf by other parts of the 869 * system). In the latter case it may be necessary to reconfigure the device's 870 * wakeup settings during system suspend, because it may have been set up to 871 * signal remote wakeup from the system's working state as needed by runtime PM. 872 * Return 'true' in either of the above cases. 873 */ 874static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) 875{ 876 bool active_wakeup; 877 878 if (!device_can_wakeup(dev)) 879 return false; 880 881 active_wakeup = genpd_dev_active_wakeup(genpd, dev); 882 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; 883} 884 885/** 886 * pm_genpd_prepare - Start power transition of a device in a PM domain. 887 * @dev: Device to start the transition of. 888 * 889 * Start a power transition of a device (during a system-wide power transition) 890 * under the assumption that its pm_domain field points to the domain member of 891 * an object of type struct generic_pm_domain representing a PM domain 892 * consisting of I/O devices. 893 */ 894static int pm_genpd_prepare(struct device *dev) 895{ 896 struct generic_pm_domain *genpd; 897 int ret; 898 899 dev_dbg(dev, "%s()\n", __func__); 900 901 genpd = dev_to_genpd(dev); 902 if (IS_ERR(genpd)) 903 return -EINVAL; 904 905 /* 906 * If a wakeup request is pending for the device, it should be woken up 907 * at this point and a system wakeup event should be reported if it's 908 * set up to wake up the system from sleep states. 909 */ 910 pm_runtime_get_noresume(dev); 911 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) 912 pm_wakeup_event(dev, 0); 913 914 if (pm_wakeup_pending()) { 915 pm_runtime_put_sync(dev); 916 return -EBUSY; 917 } 918 919 if (resume_needed(dev, genpd)) 920 pm_runtime_resume(dev); 921 922 genpd_acquire_lock(genpd); 923 924 if (genpd->prepared_count++ == 0) { 925 genpd->suspended_count = 0; 926 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 927 } 928 929 genpd_release_lock(genpd); 930 931 if (genpd->suspend_power_off) { 932 pm_runtime_put_noidle(dev); 933 return 0; 934 } 935 936 /* 937 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 938 * so pm_genpd_poweron() will return immediately, but if the device 939 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 940 * to make it operational. 941 */ 942 pm_runtime_resume(dev); 943 __pm_runtime_disable(dev, false); 944 945 ret = pm_generic_prepare(dev); 946 if (ret) { 947 mutex_lock(&genpd->lock); 948 949 if (--genpd->prepared_count == 0) 950 genpd->suspend_power_off = false; 951 952 mutex_unlock(&genpd->lock); 953 pm_runtime_enable(dev); 954 } 955 956 pm_runtime_put_sync(dev); 957 return ret; 958} 959 960/** 961 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. 962 * @dev: Device to suspend. 963 * 964 * Suspend a device under the assumption that its pm_domain field points to the 965 * domain member of an object of type struct generic_pm_domain representing 966 * a PM domain consisting of I/O devices. 967 */ 968static int pm_genpd_suspend(struct device *dev) 969{ 970 struct generic_pm_domain *genpd; 971 972 dev_dbg(dev, "%s()\n", __func__); 973 974 genpd = dev_to_genpd(dev); 975 if (IS_ERR(genpd)) 976 return -EINVAL; 977 978 return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); 979} 980 981/** 982 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain. 983 * @dev: Device to suspend. 984 * 985 * Carry out a late suspend of a device under the assumption that its 986 * pm_domain field points to the domain member of an object of type 987 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 988 */ 989static int pm_genpd_suspend_late(struct device *dev) 990{ 991 struct generic_pm_domain *genpd; 992 993 dev_dbg(dev, "%s()\n", __func__); 994 995 genpd = dev_to_genpd(dev); 996 if (IS_ERR(genpd)) 997 return -EINVAL; 998 999 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev); 1000} 1001 1002/** 1003 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. 1004 * @dev: Device to suspend. 1005 * 1006 * Stop the device and remove power from the domain if all devices in it have 1007 * been stopped. 1008 */ 1009static int pm_genpd_suspend_noirq(struct device *dev) 1010{ 1011 struct generic_pm_domain *genpd; 1012 1013 dev_dbg(dev, "%s()\n", __func__); 1014 1015 genpd = dev_to_genpd(dev); 1016 if (IS_ERR(genpd)) 1017 return -EINVAL; 1018 1019 if (genpd->suspend_power_off 1020 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1021 return 0; 1022 1023 genpd_stop_dev(genpd, dev); 1024 1025 /* 1026 * Since all of the "noirq" callbacks are executed sequentially, it is 1027 * guaranteed that this function will never run twice in parallel for 1028 * the same PM domain, so it is not necessary to use locking here. 1029 */ 1030 genpd->suspended_count++; 1031 pm_genpd_sync_poweroff(genpd); 1032 1033 return 0; 1034} 1035 1036/** 1037 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain. 1038 * @dev: Device to resume. 1039 * 1040 * Restore power to the device's PM domain, if necessary, and start the device. 1041 */ 1042static int pm_genpd_resume_noirq(struct device *dev) 1043{ 1044 struct generic_pm_domain *genpd; 1045 1046 dev_dbg(dev, "%s()\n", __func__); 1047 1048 genpd = dev_to_genpd(dev); 1049 if (IS_ERR(genpd)) 1050 return -EINVAL; 1051 1052 if (genpd->suspend_power_off 1053 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 1054 return 0; 1055 1056 /* 1057 * Since all of the "noirq" callbacks are executed sequentially, it is 1058 * guaranteed that this function will never run twice in parallel for 1059 * the same PM domain, so it is not necessary to use locking here. 1060 */ 1061 pm_genpd_sync_poweron(genpd); 1062 genpd->suspended_count--; 1063 1064 return genpd_start_dev(genpd, dev); 1065} 1066 1067/** 1068 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain. 1069 * @dev: Device to resume. 1070 * 1071 * Carry out an early resume of a device under the assumption that its 1072 * pm_domain field points to the domain member of an object of type 1073 * struct generic_pm_domain representing a power domain consisting of I/O 1074 * devices. 1075 */ 1076static int pm_genpd_resume_early(struct device *dev) 1077{ 1078 struct generic_pm_domain *genpd; 1079 1080 dev_dbg(dev, "%s()\n", __func__); 1081 1082 genpd = dev_to_genpd(dev); 1083 if (IS_ERR(genpd)) 1084 return -EINVAL; 1085 1086 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev); 1087} 1088 1089/** 1090 * pm_genpd_resume - Resume of device in an I/O PM domain. 1091 * @dev: Device to resume. 1092 * 1093 * Resume a device under the assumption that its pm_domain field points to the 1094 * domain member of an object of type struct generic_pm_domain representing 1095 * a power domain consisting of I/O devices. 1096 */ 1097static int pm_genpd_resume(struct device *dev) 1098{ 1099 struct generic_pm_domain *genpd; 1100 1101 dev_dbg(dev, "%s()\n", __func__); 1102 1103 genpd = dev_to_genpd(dev); 1104 if (IS_ERR(genpd)) 1105 return -EINVAL; 1106 1107 return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); 1108} 1109 1110/** 1111 * pm_genpd_freeze - Freezing a device in an I/O PM domain. 1112 * @dev: Device to freeze. 1113 * 1114 * Freeze a device under the assumption that its pm_domain field points to the 1115 * domain member of an object of type struct generic_pm_domain representing 1116 * a power domain consisting of I/O devices. 1117 */ 1118static int pm_genpd_freeze(struct device *dev) 1119{ 1120 struct generic_pm_domain *genpd; 1121 1122 dev_dbg(dev, "%s()\n", __func__); 1123 1124 genpd = dev_to_genpd(dev); 1125 if (IS_ERR(genpd)) 1126 return -EINVAL; 1127 1128 return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); 1129} 1130 1131/** 1132 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain. 1133 * @dev: Device to freeze. 1134 * 1135 * Carry out a late freeze of a device under the assumption that its 1136 * pm_domain field points to the domain member of an object of type 1137 * struct generic_pm_domain representing a power domain consisting of I/O 1138 * devices. 1139 */ 1140static int pm_genpd_freeze_late(struct device *dev) 1141{ 1142 struct generic_pm_domain *genpd; 1143 1144 dev_dbg(dev, "%s()\n", __func__); 1145 1146 genpd = dev_to_genpd(dev); 1147 if (IS_ERR(genpd)) 1148 return -EINVAL; 1149 1150 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev); 1151} 1152 1153/** 1154 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. 1155 * @dev: Device to freeze. 1156 * 1157 * Carry out a late freeze of a device under the assumption that its 1158 * pm_domain field points to the domain member of an object of type 1159 * struct generic_pm_domain representing a power domain consisting of I/O 1160 * devices. 1161 */ 1162static int pm_genpd_freeze_noirq(struct device *dev) 1163{ 1164 struct generic_pm_domain *genpd; 1165 1166 dev_dbg(dev, "%s()\n", __func__); 1167 1168 genpd = dev_to_genpd(dev); 1169 if (IS_ERR(genpd)) 1170 return -EINVAL; 1171 1172 return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 1173} 1174 1175/** 1176 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain. 1177 * @dev: Device to thaw. 1178 * 1179 * Start the device, unless power has been removed from the domain already 1180 * before the system transition. 1181 */ 1182static int pm_genpd_thaw_noirq(struct device *dev) 1183{ 1184 struct generic_pm_domain *genpd; 1185 1186 dev_dbg(dev, "%s()\n", __func__); 1187 1188 genpd = dev_to_genpd(dev); 1189 if (IS_ERR(genpd)) 1190 return -EINVAL; 1191 1192 return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); 1193} 1194 1195/** 1196 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain. 1197 * @dev: Device to thaw. 1198 * 1199 * Carry out an early thaw of a device under the assumption that its 1200 * pm_domain field points to the domain member of an object of type 1201 * struct generic_pm_domain representing a power domain consisting of I/O 1202 * devices. 1203 */ 1204static int pm_genpd_thaw_early(struct device *dev) 1205{ 1206 struct generic_pm_domain *genpd; 1207 1208 dev_dbg(dev, "%s()\n", __func__); 1209 1210 genpd = dev_to_genpd(dev); 1211 if (IS_ERR(genpd)) 1212 return -EINVAL; 1213 1214 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev); 1215} 1216 1217/** 1218 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. 1219 * @dev: Device to thaw. 1220 * 1221 * Thaw a device under the assumption that its pm_domain field points to the 1222 * domain member of an object of type struct generic_pm_domain representing 1223 * a power domain consisting of I/O devices. 1224 */ 1225static int pm_genpd_thaw(struct device *dev) 1226{ 1227 struct generic_pm_domain *genpd; 1228 1229 dev_dbg(dev, "%s()\n", __func__); 1230 1231 genpd = dev_to_genpd(dev); 1232 if (IS_ERR(genpd)) 1233 return -EINVAL; 1234 1235 return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); 1236} 1237 1238/** 1239 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain. 1240 * @dev: Device to resume. 1241 * 1242 * Make sure the domain will be in the same power state as before the 1243 * hibernation the system is resuming from and start the device if necessary. 1244 */ 1245static int pm_genpd_restore_noirq(struct device *dev) 1246{ 1247 struct generic_pm_domain *genpd; 1248 1249 dev_dbg(dev, "%s()\n", __func__); 1250 1251 genpd = dev_to_genpd(dev); 1252 if (IS_ERR(genpd)) 1253 return -EINVAL; 1254 1255 /* 1256 * Since all of the "noirq" callbacks are executed sequentially, it is 1257 * guaranteed that this function will never run twice in parallel for 1258 * the same PM domain, so it is not necessary to use locking here. 1259 * 1260 * At this point suspended_count == 0 means we are being run for the 1261 * first time for the given domain in the present cycle. 1262 */ 1263 if (genpd->suspended_count++ == 0) { 1264 /* 1265 * The boot kernel might put the domain into arbitrary state, 1266 * so make it appear as powered off to pm_genpd_sync_poweron(), 1267 * so that it tries to power it on in case it was really off. 1268 */ 1269 genpd->status = GPD_STATE_POWER_OFF; 1270 if (genpd->suspend_power_off) { 1271 /* 1272 * If the domain was off before the hibernation, make 1273 * sure it will be off going forward. 1274 */ 1275 if (genpd->power_off) 1276 genpd->power_off(genpd); 1277 1278 return 0; 1279 } 1280 } 1281 1282 if (genpd->suspend_power_off) 1283 return 0; 1284 1285 pm_genpd_sync_poweron(genpd); 1286 1287 return genpd_start_dev(genpd, dev); 1288} 1289 1290/** 1291 * pm_genpd_complete - Complete power transition of a device in a power domain. 1292 * @dev: Device to complete the transition of. 1293 * 1294 * Complete a power transition of a device (during a system-wide power 1295 * transition) under the assumption that its pm_domain field points to the 1296 * domain member of an object of type struct generic_pm_domain representing 1297 * a power domain consisting of I/O devices. 1298 */ 1299static void pm_genpd_complete(struct device *dev) 1300{ 1301 struct generic_pm_domain *genpd; 1302 bool run_complete; 1303 1304 dev_dbg(dev, "%s()\n", __func__); 1305 1306 genpd = dev_to_genpd(dev); 1307 if (IS_ERR(genpd)) 1308 return; 1309 1310 mutex_lock(&genpd->lock); 1311 1312 run_complete = !genpd->suspend_power_off; 1313 if (--genpd->prepared_count == 0) 1314 genpd->suspend_power_off = false; 1315 1316 mutex_unlock(&genpd->lock); 1317 1318 if (run_complete) { 1319 pm_generic_complete(dev); 1320 pm_runtime_set_active(dev); 1321 pm_runtime_enable(dev); 1322 pm_runtime_idle(dev); 1323 } 1324} 1325 1326/** 1327 * pm_genpd_syscore_switch - Switch power during system core suspend or resume. 1328 * @dev: Device that normally is marked as "always on" to switch power for. 1329 * 1330 * This routine may only be called during the system core (syscore) suspend or 1331 * resume phase for devices whose "always on" flags are set. 1332 */ 1333void pm_genpd_syscore_switch(struct device *dev, bool suspend) 1334{ 1335 struct generic_pm_domain *genpd; 1336 1337 genpd = dev_to_genpd(dev); 1338 if (!pm_genpd_present(genpd)) 1339 return; 1340 1341 if (suspend) { 1342 genpd->suspended_count++; 1343 pm_genpd_sync_poweroff(genpd); 1344 } else { 1345 pm_genpd_sync_poweron(genpd); 1346 genpd->suspended_count--; 1347 } 1348} 1349EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch); 1350 1351#else 1352 1353#define pm_genpd_prepare NULL 1354#define pm_genpd_suspend NULL 1355#define pm_genpd_suspend_late NULL 1356#define pm_genpd_suspend_noirq NULL 1357#define pm_genpd_resume_early NULL 1358#define pm_genpd_resume_noirq NULL 1359#define pm_genpd_resume NULL 1360#define pm_genpd_freeze NULL 1361#define pm_genpd_freeze_late NULL 1362#define pm_genpd_freeze_noirq NULL 1363#define pm_genpd_thaw_early NULL 1364#define pm_genpd_thaw_noirq NULL 1365#define pm_genpd_thaw NULL 1366#define pm_genpd_restore_noirq NULL 1367#define pm_genpd_complete NULL 1368 1369#endif /* CONFIG_PM_SLEEP */ 1370 1371static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev) 1372{ 1373 struct generic_pm_domain_data *gpd_data; 1374 1375 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); 1376 if (!gpd_data) 1377 return NULL; 1378 1379 mutex_init(&gpd_data->lock); 1380 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; 1381 dev_pm_qos_add_notifier(dev, &gpd_data->nb); 1382 return gpd_data; 1383} 1384 1385static void __pm_genpd_free_dev_data(struct device *dev, 1386 struct generic_pm_domain_data *gpd_data) 1387{ 1388 dev_pm_qos_remove_notifier(dev, &gpd_data->nb); 1389 kfree(gpd_data); 1390} 1391 1392/** 1393 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1394 * @genpd: PM domain to add the device to. 1395 * @dev: Device to be added. 1396 * @td: Set of PM QoS timing parameters to attach to the device. 1397 */ 1398int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1399 struct gpd_timing_data *td) 1400{ 1401 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1402 struct pm_domain_data *pdd; 1403 int ret = 0; 1404 1405 dev_dbg(dev, "%s()\n", __func__); 1406 1407 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1408 return -EINVAL; 1409 1410 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1411 if (!gpd_data_new) 1412 return -ENOMEM; 1413 1414 genpd_acquire_lock(genpd); 1415 1416 if (genpd->prepared_count > 0) { 1417 ret = -EAGAIN; 1418 goto out; 1419 } 1420 1421 list_for_each_entry(pdd, &genpd->dev_list, list_node) 1422 if (pdd->dev == dev) { 1423 ret = -EINVAL; 1424 goto out; 1425 } 1426 1427 ret = dev_pm_get_subsys_data(dev); 1428 if (ret) 1429 goto out; 1430 1431 genpd->device_count++; 1432 genpd->max_off_time_changed = true; 1433 1434 spin_lock_irq(&dev->power.lock); 1435 1436 dev->pm_domain = &genpd->domain; 1437 if (dev->power.subsys_data->domain_data) { 1438 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1439 } else { 1440 gpd_data = gpd_data_new; 1441 dev->power.subsys_data->domain_data = &gpd_data->base; 1442 } 1443 gpd_data->refcount++; 1444 if (td) 1445 gpd_data->td = *td; 1446 1447 spin_unlock_irq(&dev->power.lock); 1448 1449 mutex_lock(&gpd_data->lock); 1450 gpd_data->base.dev = dev; 1451 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1452 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1453 gpd_data->td.constraint_changed = true; 1454 gpd_data->td.effective_constraint_ns = -1; 1455 mutex_unlock(&gpd_data->lock); 1456 1457 out: 1458 genpd_release_lock(genpd); 1459 1460 if (gpd_data != gpd_data_new) 1461 __pm_genpd_free_dev_data(dev, gpd_data_new); 1462 1463 return ret; 1464} 1465 1466/** 1467 * __pm_genpd_of_add_device - Add a device to an I/O PM domain. 1468 * @genpd_node: Device tree node pointer representing a PM domain to which the 1469 * the device is added to. 1470 * @dev: Device to be added. 1471 * @td: Set of PM QoS timing parameters to attach to the device. 1472 */ 1473int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev, 1474 struct gpd_timing_data *td) 1475{ 1476 struct generic_pm_domain *genpd = NULL, *gpd; 1477 1478 dev_dbg(dev, "%s()\n", __func__); 1479 1480 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev)) 1481 return -EINVAL; 1482 1483 mutex_lock(&gpd_list_lock); 1484 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1485 if (gpd->of_node == genpd_node) { 1486 genpd = gpd; 1487 break; 1488 } 1489 } 1490 mutex_unlock(&gpd_list_lock); 1491 1492 if (!genpd) 1493 return -EINVAL; 1494 1495 return __pm_genpd_add_device(genpd, dev, td); 1496} 1497 1498 1499/** 1500 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it. 1501 * @domain_name: Name of the PM domain to add the device to. 1502 * @dev: Device to be added. 1503 * @td: Set of PM QoS timing parameters to attach to the device. 1504 */ 1505int __pm_genpd_name_add_device(const char *domain_name, struct device *dev, 1506 struct gpd_timing_data *td) 1507{ 1508 return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td); 1509} 1510 1511/** 1512 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1513 * @genpd: PM domain to remove the device from. 1514 * @dev: Device to be removed. 1515 */ 1516int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1517 struct device *dev) 1518{ 1519 struct generic_pm_domain_data *gpd_data; 1520 struct pm_domain_data *pdd; 1521 bool remove = false; 1522 int ret = 0; 1523 1524 dev_dbg(dev, "%s()\n", __func__); 1525 1526 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev) 1527 || IS_ERR_OR_NULL(dev->pm_domain) 1528 || pd_to_genpd(dev->pm_domain) != genpd) 1529 return -EINVAL; 1530 1531 genpd_acquire_lock(genpd); 1532 1533 if (genpd->prepared_count > 0) { 1534 ret = -EAGAIN; 1535 goto out; 1536 } 1537 1538 genpd->device_count--; 1539 genpd->max_off_time_changed = true; 1540 1541 spin_lock_irq(&dev->power.lock); 1542 1543 dev->pm_domain = NULL; 1544 pdd = dev->power.subsys_data->domain_data; 1545 list_del_init(&pdd->list_node); 1546 gpd_data = to_gpd_data(pdd); 1547 if (--gpd_data->refcount == 0) { 1548 dev->power.subsys_data->domain_data = NULL; 1549 remove = true; 1550 } 1551 1552 spin_unlock_irq(&dev->power.lock); 1553 1554 mutex_lock(&gpd_data->lock); 1555 pdd->dev = NULL; 1556 mutex_unlock(&gpd_data->lock); 1557 1558 genpd_release_lock(genpd); 1559 1560 dev_pm_put_subsys_data(dev); 1561 if (remove) 1562 __pm_genpd_free_dev_data(dev, gpd_data); 1563 1564 return 0; 1565 1566 out: 1567 genpd_release_lock(genpd); 1568 1569 return ret; 1570} 1571 1572/** 1573 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag. 1574 * @dev: Device to set/unset the flag for. 1575 * @val: The new value of the device's "need restore" flag. 1576 */ 1577void pm_genpd_dev_need_restore(struct device *dev, bool val) 1578{ 1579 struct pm_subsys_data *psd; 1580 unsigned long flags; 1581 1582 spin_lock_irqsave(&dev->power.lock, flags); 1583 1584 psd = dev_to_psd(dev); 1585 if (psd && psd->domain_data) 1586 to_gpd_data(psd->domain_data)->need_restore = val; 1587 1588 spin_unlock_irqrestore(&dev->power.lock, flags); 1589} 1590EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore); 1591 1592/** 1593 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1594 * @genpd: Master PM domain to add the subdomain to. 1595 * @subdomain: Subdomain to be added. 1596 */ 1597int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, 1598 struct generic_pm_domain *subdomain) 1599{ 1600 struct gpd_link *link; 1601 int ret = 0; 1602 1603 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) 1604 || genpd == subdomain) 1605 return -EINVAL; 1606 1607 start: 1608 genpd_acquire_lock(genpd); 1609 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1610 1611 if (subdomain->status != GPD_STATE_POWER_OFF 1612 && subdomain->status != GPD_STATE_ACTIVE) { 1613 mutex_unlock(&subdomain->lock); 1614 genpd_release_lock(genpd); 1615 goto start; 1616 } 1617 1618 if (genpd->status == GPD_STATE_POWER_OFF 1619 && subdomain->status != GPD_STATE_POWER_OFF) { 1620 ret = -EINVAL; 1621 goto out; 1622 } 1623 1624 list_for_each_entry(link, &genpd->master_links, master_node) { 1625 if (link->slave == subdomain && link->master == genpd) { 1626 ret = -EINVAL; 1627 goto out; 1628 } 1629 } 1630 1631 link = kzalloc(sizeof(*link), GFP_KERNEL); 1632 if (!link) { 1633 ret = -ENOMEM; 1634 goto out; 1635 } 1636 link->master = genpd; 1637 list_add_tail(&link->master_node, &genpd->master_links); 1638 link->slave = subdomain; 1639 list_add_tail(&link->slave_node, &subdomain->slave_links); 1640 if (subdomain->status != GPD_STATE_POWER_OFF) 1641 genpd_sd_counter_inc(genpd); 1642 1643 out: 1644 mutex_unlock(&subdomain->lock); 1645 genpd_release_lock(genpd); 1646 1647 return ret; 1648} 1649 1650/** 1651 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain. 1652 * @master_name: Name of the master PM domain to add the subdomain to. 1653 * @subdomain_name: Name of the subdomain to be added. 1654 */ 1655int pm_genpd_add_subdomain_names(const char *master_name, 1656 const char *subdomain_name) 1657{ 1658 struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd; 1659 1660 if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name)) 1661 return -EINVAL; 1662 1663 mutex_lock(&gpd_list_lock); 1664 list_for_each_entry(gpd, &gpd_list, gpd_list_node) { 1665 if (!master && !strcmp(gpd->name, master_name)) 1666 master = gpd; 1667 1668 if (!subdomain && !strcmp(gpd->name, subdomain_name)) 1669 subdomain = gpd; 1670 1671 if (master && subdomain) 1672 break; 1673 } 1674 mutex_unlock(&gpd_list_lock); 1675 1676 return pm_genpd_add_subdomain(master, subdomain); 1677} 1678 1679/** 1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. 1681 * @genpd: Master PM domain to remove the subdomain from. 1682 * @subdomain: Subdomain to be removed. 1683 */ 1684int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 1685 struct generic_pm_domain *subdomain) 1686{ 1687 struct gpd_link *link; 1688 int ret = -EINVAL; 1689 1690 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) 1691 return -EINVAL; 1692 1693 start: 1694 genpd_acquire_lock(genpd); 1695 1696 list_for_each_entry(link, &genpd->master_links, master_node) { 1697 if (link->slave != subdomain) 1698 continue; 1699 1700 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); 1701 1702 if (subdomain->status != GPD_STATE_POWER_OFF 1703 && subdomain->status != GPD_STATE_ACTIVE) { 1704 mutex_unlock(&subdomain->lock); 1705 genpd_release_lock(genpd); 1706 goto start; 1707 } 1708 1709 list_del(&link->master_node); 1710 list_del(&link->slave_node); 1711 kfree(link); 1712 if (subdomain->status != GPD_STATE_POWER_OFF) 1713 genpd_sd_counter_dec(genpd); 1714 1715 mutex_unlock(&subdomain->lock); 1716 1717 ret = 0; 1718 break; 1719 } 1720 1721 genpd_release_lock(genpd); 1722 1723 return ret; 1724} 1725 1726/** 1727 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. 1728 * @dev: Device to add the callbacks to. 1729 * @ops: Set of callbacks to add. 1730 * @td: Timing data to add to the device along with the callbacks (optional). 1731 * 1732 * Every call to this routine should be balanced with a call to 1733 * __pm_genpd_remove_callbacks() and they must not be nested. 1734 */ 1735int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, 1736 struct gpd_timing_data *td) 1737{ 1738 struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL; 1739 int ret = 0; 1740 1741 if (!(dev && ops)) 1742 return -EINVAL; 1743 1744 gpd_data_new = __pm_genpd_alloc_dev_data(dev); 1745 if (!gpd_data_new) 1746 return -ENOMEM; 1747 1748 pm_runtime_disable(dev); 1749 device_pm_lock(); 1750 1751 ret = dev_pm_get_subsys_data(dev); 1752 if (ret) 1753 goto out; 1754 1755 spin_lock_irq(&dev->power.lock); 1756 1757 if (dev->power.subsys_data->domain_data) { 1758 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1759 } else { 1760 gpd_data = gpd_data_new; 1761 dev->power.subsys_data->domain_data = &gpd_data->base; 1762 } 1763 gpd_data->refcount++; 1764 gpd_data->ops = *ops; 1765 if (td) 1766 gpd_data->td = *td; 1767 1768 spin_unlock_irq(&dev->power.lock); 1769 1770 out: 1771 device_pm_unlock(); 1772 pm_runtime_enable(dev); 1773 1774 if (gpd_data != gpd_data_new) 1775 __pm_genpd_free_dev_data(dev, gpd_data_new); 1776 1777 return ret; 1778} 1779EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); 1780 1781/** 1782 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. 1783 * @dev: Device to remove the callbacks from. 1784 * @clear_td: If set, clear the device's timing data too. 1785 * 1786 * This routine can only be called after pm_genpd_add_callbacks(). 1787 */ 1788int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) 1789{ 1790 struct generic_pm_domain_data *gpd_data = NULL; 1791 bool remove = false; 1792 int ret = 0; 1793 1794 if (!(dev && dev->power.subsys_data)) 1795 return -EINVAL; 1796 1797 pm_runtime_disable(dev); 1798 device_pm_lock(); 1799 1800 spin_lock_irq(&dev->power.lock); 1801 1802 if (dev->power.subsys_data->domain_data) { 1803 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); 1804 gpd_data->ops = (struct gpd_dev_ops){ NULL }; 1805 if (clear_td) 1806 gpd_data->td = (struct gpd_timing_data){ 0 }; 1807 1808 if (--gpd_data->refcount == 0) { 1809 dev->power.subsys_data->domain_data = NULL; 1810 remove = true; 1811 } 1812 } else { 1813 ret = -EINVAL; 1814 } 1815 1816 spin_unlock_irq(&dev->power.lock); 1817 1818 device_pm_unlock(); 1819 pm_runtime_enable(dev); 1820 1821 if (ret) 1822 return ret; 1823 1824 dev_pm_put_subsys_data(dev); 1825 if (remove) 1826 __pm_genpd_free_dev_data(dev, gpd_data); 1827 1828 return 0; 1829} 1830EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1831 1832/** 1833 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle. 1834 * @genpd: PM domain to be connected with cpuidle. 1835 * @state: cpuidle state this domain can disable/enable. 1836 * 1837 * Make a PM domain behave as though it contained a CPU core, that is, instead 1838 * of calling its power down routine it will enable the given cpuidle state so 1839 * that the cpuidle subsystem can power it down (if possible and desirable). 1840 */ 1841int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) 1842{ 1843 struct cpuidle_driver *cpuidle_drv; 1844 struct gpd_cpu_data *cpu_data; 1845 struct cpuidle_state *idle_state; 1846 int ret = 0; 1847 1848 if (IS_ERR_OR_NULL(genpd) || state < 0) 1849 return -EINVAL; 1850 1851 genpd_acquire_lock(genpd); 1852 1853 if (genpd->cpu_data) { 1854 ret = -EEXIST; 1855 goto out; 1856 } 1857 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL); 1858 if (!cpu_data) { 1859 ret = -ENOMEM; 1860 goto out; 1861 } 1862 cpuidle_drv = cpuidle_driver_ref(); 1863 if (!cpuidle_drv) { 1864 ret = -ENODEV; 1865 goto out; 1866 } 1867 if (cpuidle_drv->state_count <= state) { 1868 ret = -EINVAL; 1869 goto err; 1870 } 1871 idle_state = &cpuidle_drv->states[state]; 1872 if (!idle_state->disabled) { 1873 ret = -EAGAIN; 1874 goto err; 1875 } 1876 cpu_data->idle_state = idle_state; 1877 cpu_data->saved_exit_latency = idle_state->exit_latency; 1878 genpd->cpu_data = cpu_data; 1879 genpd_recalc_cpu_exit_latency(genpd); 1880 1881 out: 1882 genpd_release_lock(genpd); 1883 return ret; 1884 1885 err: 1886 cpuidle_driver_unref(); 1887 goto out; 1888} 1889 1890/** 1891 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain. 1892 * @genpd: PM domain to remove the cpuidle connection from. 1893 * 1894 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the 1895 * given PM domain. 1896 */ 1897int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) 1898{ 1899 struct gpd_cpu_data *cpu_data; 1900 struct cpuidle_state *idle_state; 1901 int ret = 0; 1902 1903 if (IS_ERR_OR_NULL(genpd)) 1904 return -EINVAL; 1905 1906 genpd_acquire_lock(genpd); 1907 1908 cpu_data = genpd->cpu_data; 1909 if (!cpu_data) { 1910 ret = -ENODEV; 1911 goto out; 1912 } 1913 idle_state = cpu_data->idle_state; 1914 if (!idle_state->disabled) { 1915 ret = -EAGAIN; 1916 goto out; 1917 } 1918 idle_state->exit_latency = cpu_data->saved_exit_latency; 1919 cpuidle_driver_unref(); 1920 genpd->cpu_data = NULL; 1921 kfree(cpu_data); 1922 1923 out: 1924 genpd_release_lock(genpd); 1925 return ret; 1926} 1927 1928/* Default device callbacks for generic PM domains. */ 1929 1930/** 1931 * pm_genpd_default_save_state - Default "save device state" for PM domians. 1932 * @dev: Device to handle. 1933 */ 1934static int pm_genpd_default_save_state(struct device *dev) 1935{ 1936 int (*cb)(struct device *__dev); 1937 1938 cb = dev_gpd_data(dev)->ops.save_state; 1939 if (cb) 1940 return cb(dev); 1941 1942 if (dev->type && dev->type->pm) 1943 cb = dev->type->pm->runtime_suspend; 1944 else if (dev->class && dev->class->pm) 1945 cb = dev->class->pm->runtime_suspend; 1946 else if (dev->bus && dev->bus->pm) 1947 cb = dev->bus->pm->runtime_suspend; 1948 else 1949 cb = NULL; 1950 1951 if (!cb && dev->driver && dev->driver->pm) 1952 cb = dev->driver->pm->runtime_suspend; 1953 1954 return cb ? cb(dev) : 0; 1955} 1956 1957/** 1958 * pm_genpd_default_restore_state - Default PM domians "restore device state". 1959 * @dev: Device to handle. 1960 */ 1961static int pm_genpd_default_restore_state(struct device *dev) 1962{ 1963 int (*cb)(struct device *__dev); 1964 1965 cb = dev_gpd_data(dev)->ops.restore_state; 1966 if (cb) 1967 return cb(dev); 1968 1969 if (dev->type && dev->type->pm) 1970 cb = dev->type->pm->runtime_resume; 1971 else if (dev->class && dev->class->pm) 1972 cb = dev->class->pm->runtime_resume; 1973 else if (dev->bus && dev->bus->pm) 1974 cb = dev->bus->pm->runtime_resume; 1975 else 1976 cb = NULL; 1977 1978 if (!cb && dev->driver && dev->driver->pm) 1979 cb = dev->driver->pm->runtime_resume; 1980 1981 return cb ? cb(dev) : 0; 1982} 1983 1984#ifdef CONFIG_PM_SLEEP 1985 1986/** 1987 * pm_genpd_default_suspend - Default "device suspend" for PM domians. 1988 * @dev: Device to handle. 1989 */ 1990static int pm_genpd_default_suspend(struct device *dev) 1991{ 1992 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; 1993 1994 return cb ? cb(dev) : pm_generic_suspend(dev); 1995} 1996 1997/** 1998 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. 1999 * @dev: Device to handle. 2000 */ 2001static int pm_genpd_default_suspend_late(struct device *dev) 2002{ 2003 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; 2004 2005 return cb ? cb(dev) : pm_generic_suspend_late(dev); 2006} 2007 2008/** 2009 * pm_genpd_default_resume_early - Default "early device resume" for PM domians. 2010 * @dev: Device to handle. 2011 */ 2012static int pm_genpd_default_resume_early(struct device *dev) 2013{ 2014 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; 2015 2016 return cb ? cb(dev) : pm_generic_resume_early(dev); 2017} 2018 2019/** 2020 * pm_genpd_default_resume - Default "device resume" for PM domians. 2021 * @dev: Device to handle. 2022 */ 2023static int pm_genpd_default_resume(struct device *dev) 2024{ 2025 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; 2026 2027 return cb ? cb(dev) : pm_generic_resume(dev); 2028} 2029 2030/** 2031 * pm_genpd_default_freeze - Default "device freeze" for PM domians. 2032 * @dev: Device to handle. 2033 */ 2034static int pm_genpd_default_freeze(struct device *dev) 2035{ 2036 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; 2037 2038 return cb ? cb(dev) : pm_generic_freeze(dev); 2039} 2040 2041/** 2042 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. 2043 * @dev: Device to handle. 2044 */ 2045static int pm_genpd_default_freeze_late(struct device *dev) 2046{ 2047 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; 2048 2049 return cb ? cb(dev) : pm_generic_freeze_late(dev); 2050} 2051 2052/** 2053 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. 2054 * @dev: Device to handle. 2055 */ 2056static int pm_genpd_default_thaw_early(struct device *dev) 2057{ 2058 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; 2059 2060 return cb ? cb(dev) : pm_generic_thaw_early(dev); 2061} 2062 2063/** 2064 * pm_genpd_default_thaw - Default "device thaw" for PM domians. 2065 * @dev: Device to handle. 2066 */ 2067static int pm_genpd_default_thaw(struct device *dev) 2068{ 2069 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; 2070 2071 return cb ? cb(dev) : pm_generic_thaw(dev); 2072} 2073 2074#else /* !CONFIG_PM_SLEEP */ 2075 2076#define pm_genpd_default_suspend NULL 2077#define pm_genpd_default_suspend_late NULL 2078#define pm_genpd_default_resume_early NULL 2079#define pm_genpd_default_resume NULL 2080#define pm_genpd_default_freeze NULL 2081#define pm_genpd_default_freeze_late NULL 2082#define pm_genpd_default_thaw_early NULL 2083#define pm_genpd_default_thaw NULL 2084 2085#endif /* !CONFIG_PM_SLEEP */ 2086 2087/** 2088 * pm_genpd_init - Initialize a generic I/O PM domain object. 2089 * @genpd: PM domain object to initialize. 2090 * @gov: PM domain governor to associate with the domain (may be NULL). 2091 * @is_off: Initial value of the domain's power_is_off field. 2092 */ 2093void pm_genpd_init(struct generic_pm_domain *genpd, 2094 struct dev_power_governor *gov, bool is_off) 2095{ 2096 if (IS_ERR_OR_NULL(genpd)) 2097 return; 2098 2099 INIT_LIST_HEAD(&genpd->master_links); 2100 INIT_LIST_HEAD(&genpd->slave_links); 2101 INIT_LIST_HEAD(&genpd->dev_list); 2102 mutex_init(&genpd->lock); 2103 genpd->gov = gov; 2104 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); 2105 genpd->in_progress = 0; 2106 atomic_set(&genpd->sd_count, 0); 2107 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; 2108 init_waitqueue_head(&genpd->status_wait_queue); 2109 genpd->poweroff_task = NULL; 2110 genpd->resume_count = 0; 2111 genpd->device_count = 0; 2112 genpd->max_off_time_ns = -1; 2113 genpd->max_off_time_changed = true; 2114 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 2115 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 2116 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 2117 genpd->domain.ops.prepare = pm_genpd_prepare; 2118 genpd->domain.ops.suspend = pm_genpd_suspend; 2119 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; 2120 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 2121 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 2122 genpd->domain.ops.resume_early = pm_genpd_resume_early; 2123 genpd->domain.ops.resume = pm_genpd_resume; 2124 genpd->domain.ops.freeze = pm_genpd_freeze; 2125 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; 2126 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 2127 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 2128 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; 2129 genpd->domain.ops.thaw = pm_genpd_thaw; 2130 genpd->domain.ops.poweroff = pm_genpd_suspend; 2131 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; 2132 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 2133 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 2134 genpd->domain.ops.restore_early = pm_genpd_resume_early; 2135 genpd->domain.ops.restore = pm_genpd_resume; 2136 genpd->domain.ops.complete = pm_genpd_complete; 2137 genpd->dev_ops.save_state = pm_genpd_default_save_state; 2138 genpd->dev_ops.restore_state = pm_genpd_default_restore_state; 2139 genpd->dev_ops.suspend = pm_genpd_default_suspend; 2140 genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; 2141 genpd->dev_ops.resume_early = pm_genpd_default_resume_early; 2142 genpd->dev_ops.resume = pm_genpd_default_resume; 2143 genpd->dev_ops.freeze = pm_genpd_default_freeze; 2144 genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; 2145 genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; 2146 genpd->dev_ops.thaw = pm_genpd_default_thaw; 2147 mutex_lock(&gpd_list_lock); 2148 list_add(&genpd->gpd_list_node, &gpd_list); 2149 mutex_unlock(&gpd_list_lock); 2150} 2151