domain.c revision cd0ea672f58d5cfdea271c45cec0c897f2b792aa
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/sched.h>
17#include <linux/suspend.h>
18
19static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock);
21
22#ifdef CONFIG_PM
23
24static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{
26	if (IS_ERR_OR_NULL(dev->pm_domain))
27		return ERR_PTR(-EINVAL);
28
29	return pd_to_genpd(dev->pm_domain);
30}
31
32static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{
34	bool ret = false;
35
36	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
37		ret = !!atomic_dec_and_test(&genpd->sd_count);
38
39	return ret;
40}
41
42static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
43{
44	atomic_inc(&genpd->sd_count);
45	smp_mb__after_atomic_inc();
46}
47
48static void genpd_acquire_lock(struct generic_pm_domain *genpd)
49{
50	DEFINE_WAIT(wait);
51
52	mutex_lock(&genpd->lock);
53	/*
54	 * Wait for the domain to transition into either the active,
55	 * or the power off state.
56	 */
57	for (;;) {
58		prepare_to_wait(&genpd->status_wait_queue, &wait,
59				TASK_UNINTERRUPTIBLE);
60		if (genpd->status == GPD_STATE_ACTIVE
61		    || genpd->status == GPD_STATE_POWER_OFF)
62			break;
63		mutex_unlock(&genpd->lock);
64
65		schedule();
66
67		mutex_lock(&genpd->lock);
68	}
69	finish_wait(&genpd->status_wait_queue, &wait);
70}
71
72static void genpd_release_lock(struct generic_pm_domain *genpd)
73{
74	mutex_unlock(&genpd->lock);
75}
76
77static void genpd_set_active(struct generic_pm_domain *genpd)
78{
79	if (genpd->resume_count == 0)
80		genpd->status = GPD_STATE_ACTIVE;
81}
82
83/**
84 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
85 * @genpd: PM domain to power up.
86 *
87 * Restore power to @genpd and all of its masters so that it is possible to
88 * resume a device belonging to it.
89 */
90int __pm_genpd_poweron(struct generic_pm_domain *genpd)
91	__releases(&genpd->lock) __acquires(&genpd->lock)
92{
93	struct gpd_link *link;
94	DEFINE_WAIT(wait);
95	int ret = 0;
96
97	/* If the domain's master is being waited for, we have to wait too. */
98	for (;;) {
99		prepare_to_wait(&genpd->status_wait_queue, &wait,
100				TASK_UNINTERRUPTIBLE);
101		if (genpd->status != GPD_STATE_WAIT_MASTER)
102			break;
103		mutex_unlock(&genpd->lock);
104
105		schedule();
106
107		mutex_lock(&genpd->lock);
108	}
109	finish_wait(&genpd->status_wait_queue, &wait);
110
111	if (genpd->status == GPD_STATE_ACTIVE
112	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
113		return 0;
114
115	if (genpd->status != GPD_STATE_POWER_OFF) {
116		genpd_set_active(genpd);
117		return 0;
118	}
119
120	/*
121	 * The list is guaranteed not to change while the loop below is being
122	 * executed, unless one of the masters' .power_on() callbacks fiddles
123	 * with it.
124	 */
125	list_for_each_entry(link, &genpd->slave_links, slave_node) {
126		genpd_sd_counter_inc(link->master);
127		genpd->status = GPD_STATE_WAIT_MASTER;
128
129		mutex_unlock(&genpd->lock);
130
131		ret = pm_genpd_poweron(link->master);
132
133		mutex_lock(&genpd->lock);
134
135		/*
136		 * The "wait for parent" status is guaranteed not to change
137		 * while the master is powering on.
138		 */
139		genpd->status = GPD_STATE_POWER_OFF;
140		wake_up_all(&genpd->status_wait_queue);
141		if (ret) {
142			genpd_sd_counter_dec(link->master);
143			goto err;
144		}
145	}
146
147	if (genpd->power_on) {
148		ret = genpd->power_on(genpd);
149		if (ret)
150			goto err;
151	}
152
153	genpd_set_active(genpd);
154
155	return 0;
156
157 err:
158	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
159		genpd_sd_counter_dec(link->master);
160
161	return ret;
162}
163
164/**
165 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
166 * @genpd: PM domain to power up.
167 */
168int pm_genpd_poweron(struct generic_pm_domain *genpd)
169{
170	int ret;
171
172	mutex_lock(&genpd->lock);
173	ret = __pm_genpd_poweron(genpd);
174	mutex_unlock(&genpd->lock);
175	return ret;
176}
177
178#endif /* CONFIG_PM */
179
180#ifdef CONFIG_PM_RUNTIME
181
182/**
183 * __pm_genpd_save_device - Save the pre-suspend state of a device.
184 * @pdd: Domain data of the device to save the state of.
185 * @genpd: PM domain the device belongs to.
186 */
187static int __pm_genpd_save_device(struct pm_domain_data *pdd,
188				  struct generic_pm_domain *genpd)
189	__releases(&genpd->lock) __acquires(&genpd->lock)
190{
191	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
192	struct device *dev = pdd->dev;
193	struct device_driver *drv = dev->driver;
194	int ret = 0;
195
196	if (gpd_data->need_restore)
197		return 0;
198
199	mutex_unlock(&genpd->lock);
200
201	if (drv && drv->pm && drv->pm->runtime_suspend) {
202		if (genpd->start_device)
203			genpd->start_device(dev);
204
205		ret = drv->pm->runtime_suspend(dev);
206
207		if (genpd->stop_device)
208			genpd->stop_device(dev);
209	}
210
211	mutex_lock(&genpd->lock);
212
213	if (!ret)
214		gpd_data->need_restore = true;
215
216	return ret;
217}
218
219/**
220 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
221 * @pdd: Domain data of the device to restore the state of.
222 * @genpd: PM domain the device belongs to.
223 */
224static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
225				      struct generic_pm_domain *genpd)
226	__releases(&genpd->lock) __acquires(&genpd->lock)
227{
228	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
229	struct device *dev = pdd->dev;
230	struct device_driver *drv = dev->driver;
231
232	if (!gpd_data->need_restore)
233		return;
234
235	mutex_unlock(&genpd->lock);
236
237	if (drv && drv->pm && drv->pm->runtime_resume) {
238		if (genpd->start_device)
239			genpd->start_device(dev);
240
241		drv->pm->runtime_resume(dev);
242
243		if (genpd->stop_device)
244			genpd->stop_device(dev);
245	}
246
247	mutex_lock(&genpd->lock);
248
249	gpd_data->need_restore = false;
250}
251
252/**
253 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
254 * @genpd: PM domain to check.
255 *
256 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
257 * a "power off" operation, which means that a "power on" has occured in the
258 * meantime, or if its resume_count field is different from zero, which means
259 * that one of its devices has been resumed in the meantime.
260 */
261static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
262{
263	return genpd->status == GPD_STATE_WAIT_MASTER
264		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
265}
266
267/**
268 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
269 * @genpd: PM domait to power off.
270 *
271 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
272 * before.
273 */
274void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
275{
276	if (!work_pending(&genpd->power_off_work))
277		queue_work(pm_wq, &genpd->power_off_work);
278}
279
280/**
281 * pm_genpd_poweroff - Remove power from a given PM domain.
282 * @genpd: PM domain to power down.
283 *
284 * If all of the @genpd's devices have been suspended and all of its subdomains
285 * have been powered down, run the runtime suspend callbacks provided by all of
286 * the @genpd's devices' drivers and remove power from @genpd.
287 */
288static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
289	__releases(&genpd->lock) __acquires(&genpd->lock)
290{
291	struct pm_domain_data *pdd;
292	struct gpd_link *link;
293	unsigned int not_suspended;
294	int ret = 0;
295
296 start:
297	/*
298	 * Do not try to power off the domain in the following situations:
299	 * (1) The domain is already in the "power off" state.
300	 * (2) The domain is waiting for its master to power up.
301	 * (3) One of the domain's devices is being resumed right now.
302	 * (4) System suspend is in progress.
303	 */
304	if (genpd->status == GPD_STATE_POWER_OFF
305	    || genpd->status == GPD_STATE_WAIT_MASTER
306	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
307		return 0;
308
309	if (atomic_read(&genpd->sd_count) > 0)
310		return -EBUSY;
311
312	not_suspended = 0;
313	list_for_each_entry(pdd, &genpd->dev_list, list_node)
314		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
315		    || pdd->dev->power.irq_safe))
316			not_suspended++;
317
318	if (not_suspended > genpd->in_progress)
319		return -EBUSY;
320
321	if (genpd->poweroff_task) {
322		/*
323		 * Another instance of pm_genpd_poweroff() is executing
324		 * callbacks, so tell it to start over and return.
325		 */
326		genpd->status = GPD_STATE_REPEAT;
327		return 0;
328	}
329
330	if (genpd->gov && genpd->gov->power_down_ok) {
331		if (!genpd->gov->power_down_ok(&genpd->domain))
332			return -EAGAIN;
333	}
334
335	genpd->status = GPD_STATE_BUSY;
336	genpd->poweroff_task = current;
337
338	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
339		ret = atomic_read(&genpd->sd_count) == 0 ?
340			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
341
342		if (genpd_abort_poweroff(genpd))
343			goto out;
344
345		if (ret) {
346			genpd_set_active(genpd);
347			goto out;
348		}
349
350		if (genpd->status == GPD_STATE_REPEAT) {
351			genpd->poweroff_task = NULL;
352			goto start;
353		}
354	}
355
356	if (genpd->power_off) {
357		if (atomic_read(&genpd->sd_count) > 0) {
358			ret = -EBUSY;
359			goto out;
360		}
361
362		/*
363		 * If sd_count > 0 at this point, one of the subdomains hasn't
364		 * managed to call pm_genpd_poweron() for the master yet after
365		 * incrementing it.  In that case pm_genpd_poweron() will wait
366		 * for us to drop the lock, so we can call .power_off() and let
367		 * the pm_genpd_poweron() restore power for us (this shouldn't
368		 * happen very often).
369		 */
370		ret = genpd->power_off(genpd);
371		if (ret == -EBUSY) {
372			genpd_set_active(genpd);
373			goto out;
374		}
375	}
376
377	genpd->status = GPD_STATE_POWER_OFF;
378
379	list_for_each_entry(link, &genpd->slave_links, slave_node) {
380		genpd_sd_counter_dec(link->master);
381		genpd_queue_power_off_work(link->master);
382	}
383
384 out:
385	genpd->poweroff_task = NULL;
386	wake_up_all(&genpd->status_wait_queue);
387	return ret;
388}
389
390/**
391 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
392 * @work: Work structure used for scheduling the execution of this function.
393 */
394static void genpd_power_off_work_fn(struct work_struct *work)
395{
396	struct generic_pm_domain *genpd;
397
398	genpd = container_of(work, struct generic_pm_domain, power_off_work);
399
400	genpd_acquire_lock(genpd);
401	pm_genpd_poweroff(genpd);
402	genpd_release_lock(genpd);
403}
404
405/**
406 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
407 * @dev: Device to suspend.
408 *
409 * Carry out a runtime suspend of a device under the assumption that its
410 * pm_domain field points to the domain member of an object of type
411 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
412 */
413static int pm_genpd_runtime_suspend(struct device *dev)
414{
415	struct generic_pm_domain *genpd;
416
417	dev_dbg(dev, "%s()\n", __func__);
418
419	genpd = dev_to_genpd(dev);
420	if (IS_ERR(genpd))
421		return -EINVAL;
422
423	might_sleep_if(!genpd->dev_irq_safe);
424
425	if (genpd->stop_device) {
426		int ret = genpd->stop_device(dev);
427		if (ret)
428			return ret;
429	}
430
431	/*
432	 * If power.irq_safe is set, this routine will be run with interrupts
433	 * off, so it can't use mutexes.
434	 */
435	if (dev->power.irq_safe)
436		return 0;
437
438	mutex_lock(&genpd->lock);
439	genpd->in_progress++;
440	pm_genpd_poweroff(genpd);
441	genpd->in_progress--;
442	mutex_unlock(&genpd->lock);
443
444	return 0;
445}
446
447/**
448 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
449 * @dev: Device to resume.
450 *
451 * Carry out a runtime resume of a device under the assumption that its
452 * pm_domain field points to the domain member of an object of type
453 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
454 */
455static int pm_genpd_runtime_resume(struct device *dev)
456{
457	struct generic_pm_domain *genpd;
458	DEFINE_WAIT(wait);
459	int ret;
460
461	dev_dbg(dev, "%s()\n", __func__);
462
463	genpd = dev_to_genpd(dev);
464	if (IS_ERR(genpd))
465		return -EINVAL;
466
467	might_sleep_if(!genpd->dev_irq_safe);
468
469	/* If power.irq_safe, the PM domain is never powered off. */
470	if (dev->power.irq_safe)
471		goto out;
472
473	mutex_lock(&genpd->lock);
474	ret = __pm_genpd_poweron(genpd);
475	if (ret) {
476		mutex_unlock(&genpd->lock);
477		return ret;
478	}
479	genpd->status = GPD_STATE_BUSY;
480	genpd->resume_count++;
481	for (;;) {
482		prepare_to_wait(&genpd->status_wait_queue, &wait,
483				TASK_UNINTERRUPTIBLE);
484		/*
485		 * If current is the powering off task, we have been called
486		 * reentrantly from one of the device callbacks, so we should
487		 * not wait.
488		 */
489		if (!genpd->poweroff_task || genpd->poweroff_task == current)
490			break;
491		mutex_unlock(&genpd->lock);
492
493		schedule();
494
495		mutex_lock(&genpd->lock);
496	}
497	finish_wait(&genpd->status_wait_queue, &wait);
498	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
499	genpd->resume_count--;
500	genpd_set_active(genpd);
501	wake_up_all(&genpd->status_wait_queue);
502	mutex_unlock(&genpd->lock);
503
504 out:
505	if (genpd->start_device)
506		genpd->start_device(dev);
507
508	return 0;
509}
510
511/**
512 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
513 */
514void pm_genpd_poweroff_unused(void)
515{
516	struct generic_pm_domain *genpd;
517
518	mutex_lock(&gpd_list_lock);
519
520	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
521		genpd_queue_power_off_work(genpd);
522
523	mutex_unlock(&gpd_list_lock);
524}
525
526#else
527
528static inline void genpd_power_off_work_fn(struct work_struct *work) {}
529
530#define pm_genpd_runtime_suspend	NULL
531#define pm_genpd_runtime_resume		NULL
532
533#endif /* CONFIG_PM_RUNTIME */
534
535#ifdef CONFIG_PM_SLEEP
536
537/**
538 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
539 * @genpd: PM domain to power off, if possible.
540 *
541 * Check if the given PM domain can be powered off (during system suspend or
542 * hibernation) and do that if so.  Also, in that case propagate to its masters.
543 *
544 * This function is only called in "noirq" stages of system power transitions,
545 * so it need not acquire locks (all of the "noirq" callbacks are executed
546 * sequentially, so it is guaranteed that it will never run twice in parallel).
547 */
548static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
549{
550	struct gpd_link *link;
551
552	if (genpd->status == GPD_STATE_POWER_OFF)
553		return;
554
555	if (genpd->suspended_count != genpd->device_count
556	    || atomic_read(&genpd->sd_count) > 0)
557		return;
558
559	if (genpd->power_off)
560		genpd->power_off(genpd);
561
562	genpd->status = GPD_STATE_POWER_OFF;
563
564	list_for_each_entry(link, &genpd->slave_links, slave_node) {
565		genpd_sd_counter_dec(link->master);
566		pm_genpd_sync_poweroff(link->master);
567	}
568}
569
570/**
571 * resume_needed - Check whether to resume a device before system suspend.
572 * @dev: Device to check.
573 * @genpd: PM domain the device belongs to.
574 *
575 * There are two cases in which a device that can wake up the system from sleep
576 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
577 * to wake up the system and it has to remain active for this purpose while the
578 * system is in the sleep state and (2) if the device is not enabled to wake up
579 * the system from sleep states and it generally doesn't generate wakeup signals
580 * by itself (those signals are generated on its behalf by other parts of the
581 * system).  In the latter case it may be necessary to reconfigure the device's
582 * wakeup settings during system suspend, because it may have been set up to
583 * signal remote wakeup from the system's working state as needed by runtime PM.
584 * Return 'true' in either of the above cases.
585 */
586static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
587{
588	bool active_wakeup;
589
590	if (!device_can_wakeup(dev))
591		return false;
592
593	active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
594	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
595}
596
597/**
598 * pm_genpd_prepare - Start power transition of a device in a PM domain.
599 * @dev: Device to start the transition of.
600 *
601 * Start a power transition of a device (during a system-wide power transition)
602 * under the assumption that its pm_domain field points to the domain member of
603 * an object of type struct generic_pm_domain representing a PM domain
604 * consisting of I/O devices.
605 */
606static int pm_genpd_prepare(struct device *dev)
607{
608	struct generic_pm_domain *genpd;
609	int ret;
610
611	dev_dbg(dev, "%s()\n", __func__);
612
613	genpd = dev_to_genpd(dev);
614	if (IS_ERR(genpd))
615		return -EINVAL;
616
617	/*
618	 * If a wakeup request is pending for the device, it should be woken up
619	 * at this point and a system wakeup event should be reported if it's
620	 * set up to wake up the system from sleep states.
621	 */
622	pm_runtime_get_noresume(dev);
623	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
624		pm_wakeup_event(dev, 0);
625
626	if (pm_wakeup_pending()) {
627		pm_runtime_put_sync(dev);
628		return -EBUSY;
629	}
630
631	if (resume_needed(dev, genpd))
632		pm_runtime_resume(dev);
633
634	genpd_acquire_lock(genpd);
635
636	if (genpd->prepared_count++ == 0)
637		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
638
639	genpd_release_lock(genpd);
640
641	if (genpd->suspend_power_off) {
642		pm_runtime_put_noidle(dev);
643		return 0;
644	}
645
646	/*
647	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
648	 * so pm_genpd_poweron() will return immediately, but if the device
649	 * is suspended (e.g. it's been stopped by .stop_device()), we need
650	 * to make it operational.
651	 */
652	pm_runtime_resume(dev);
653	__pm_runtime_disable(dev, false);
654
655	ret = pm_generic_prepare(dev);
656	if (ret) {
657		mutex_lock(&genpd->lock);
658
659		if (--genpd->prepared_count == 0)
660			genpd->suspend_power_off = false;
661
662		mutex_unlock(&genpd->lock);
663		pm_runtime_enable(dev);
664	}
665
666	pm_runtime_put_sync(dev);
667	return ret;
668}
669
670/**
671 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
672 * @dev: Device to suspend.
673 *
674 * Suspend a device under the assumption that its pm_domain field points to the
675 * domain member of an object of type struct generic_pm_domain representing
676 * a PM domain consisting of I/O devices.
677 */
678static int pm_genpd_suspend(struct device *dev)
679{
680	struct generic_pm_domain *genpd;
681
682	dev_dbg(dev, "%s()\n", __func__);
683
684	genpd = dev_to_genpd(dev);
685	if (IS_ERR(genpd))
686		return -EINVAL;
687
688	return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
689}
690
691/**
692 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
693 * @dev: Device to suspend.
694 *
695 * Carry out a late suspend of a device under the assumption that its
696 * pm_domain field points to the domain member of an object of type
697 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
698 */
699static int pm_genpd_suspend_noirq(struct device *dev)
700{
701	struct generic_pm_domain *genpd;
702	int ret;
703
704	dev_dbg(dev, "%s()\n", __func__);
705
706	genpd = dev_to_genpd(dev);
707	if (IS_ERR(genpd))
708		return -EINVAL;
709
710	if (genpd->suspend_power_off)
711		return 0;
712
713	ret = pm_generic_suspend_noirq(dev);
714	if (ret)
715		return ret;
716
717	if (device_may_wakeup(dev)
718	    && genpd->active_wakeup && genpd->active_wakeup(dev))
719		return 0;
720
721	if (genpd->stop_device)
722		genpd->stop_device(dev);
723
724	/*
725	 * Since all of the "noirq" callbacks are executed sequentially, it is
726	 * guaranteed that this function will never run twice in parallel for
727	 * the same PM domain, so it is not necessary to use locking here.
728	 */
729	genpd->suspended_count++;
730	pm_genpd_sync_poweroff(genpd);
731
732	return 0;
733}
734
735/**
736 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
737 * @dev: Device to resume.
738 *
739 * Carry out an early resume of a device under the assumption that its
740 * pm_domain field points to the domain member of an object of type
741 * struct generic_pm_domain representing a power domain consisting of I/O
742 * devices.
743 */
744static int pm_genpd_resume_noirq(struct device *dev)
745{
746	struct generic_pm_domain *genpd;
747
748	dev_dbg(dev, "%s()\n", __func__);
749
750	genpd = dev_to_genpd(dev);
751	if (IS_ERR(genpd))
752		return -EINVAL;
753
754	if (genpd->suspend_power_off)
755		return 0;
756
757	/*
758	 * Since all of the "noirq" callbacks are executed sequentially, it is
759	 * guaranteed that this function will never run twice in parallel for
760	 * the same PM domain, so it is not necessary to use locking here.
761	 */
762	pm_genpd_poweron(genpd);
763	genpd->suspended_count--;
764	if (genpd->start_device)
765		genpd->start_device(dev);
766
767	return pm_generic_resume_noirq(dev);
768}
769
770/**
771 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
772 * @dev: Device to resume.
773 *
774 * Resume a device under the assumption that its pm_domain field points to the
775 * domain member of an object of type struct generic_pm_domain representing
776 * a power domain consisting of I/O devices.
777 */
778static int pm_genpd_resume(struct device *dev)
779{
780	struct generic_pm_domain *genpd;
781
782	dev_dbg(dev, "%s()\n", __func__);
783
784	genpd = dev_to_genpd(dev);
785	if (IS_ERR(genpd))
786		return -EINVAL;
787
788	return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
789}
790
791/**
792 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
793 * @dev: Device to freeze.
794 *
795 * Freeze a device under the assumption that its pm_domain field points to the
796 * domain member of an object of type struct generic_pm_domain representing
797 * a power domain consisting of I/O devices.
798 */
799static int pm_genpd_freeze(struct device *dev)
800{
801	struct generic_pm_domain *genpd;
802
803	dev_dbg(dev, "%s()\n", __func__);
804
805	genpd = dev_to_genpd(dev);
806	if (IS_ERR(genpd))
807		return -EINVAL;
808
809	return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
810}
811
812/**
813 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
814 * @dev: Device to freeze.
815 *
816 * Carry out a late freeze of a device under the assumption that its
817 * pm_domain field points to the domain member of an object of type
818 * struct generic_pm_domain representing a power domain consisting of I/O
819 * devices.
820 */
821static int pm_genpd_freeze_noirq(struct device *dev)
822{
823	struct generic_pm_domain *genpd;
824	int ret;
825
826	dev_dbg(dev, "%s()\n", __func__);
827
828	genpd = dev_to_genpd(dev);
829	if (IS_ERR(genpd))
830		return -EINVAL;
831
832	if (genpd->suspend_power_off)
833		return 0;
834
835	ret = pm_generic_freeze_noirq(dev);
836	if (ret)
837		return ret;
838
839	if (genpd->stop_device)
840		genpd->stop_device(dev);
841
842	return 0;
843}
844
845/**
846 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
847 * @dev: Device to thaw.
848 *
849 * Carry out an early thaw of a device under the assumption that its
850 * pm_domain field points to the domain member of an object of type
851 * struct generic_pm_domain representing a power domain consisting of I/O
852 * devices.
853 */
854static int pm_genpd_thaw_noirq(struct device *dev)
855{
856	struct generic_pm_domain *genpd;
857
858	dev_dbg(dev, "%s()\n", __func__);
859
860	genpd = dev_to_genpd(dev);
861	if (IS_ERR(genpd))
862		return -EINVAL;
863
864	if (genpd->suspend_power_off)
865		return 0;
866
867	if (genpd->start_device)
868		genpd->start_device(dev);
869
870	return pm_generic_thaw_noirq(dev);
871}
872
873/**
874 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
875 * @dev: Device to thaw.
876 *
877 * Thaw a device under the assumption that its pm_domain field points to the
878 * domain member of an object of type struct generic_pm_domain representing
879 * a power domain consisting of I/O devices.
880 */
881static int pm_genpd_thaw(struct device *dev)
882{
883	struct generic_pm_domain *genpd;
884
885	dev_dbg(dev, "%s()\n", __func__);
886
887	genpd = dev_to_genpd(dev);
888	if (IS_ERR(genpd))
889		return -EINVAL;
890
891	return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
892}
893
894/**
895 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
896 * @dev: Device to suspend.
897 *
898 * Power off a device under the assumption that its pm_domain field points to
899 * the domain member of an object of type struct generic_pm_domain representing
900 * a PM domain consisting of I/O devices.
901 */
902static int pm_genpd_dev_poweroff(struct device *dev)
903{
904	struct generic_pm_domain *genpd;
905
906	dev_dbg(dev, "%s()\n", __func__);
907
908	genpd = dev_to_genpd(dev);
909	if (IS_ERR(genpd))
910		return -EINVAL;
911
912	return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
913}
914
915/**
916 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
917 * @dev: Device to suspend.
918 *
919 * Carry out a late powering off of a device under the assumption that its
920 * pm_domain field points to the domain member of an object of type
921 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
922 */
923static int pm_genpd_dev_poweroff_noirq(struct device *dev)
924{
925	struct generic_pm_domain *genpd;
926	int ret;
927
928	dev_dbg(dev, "%s()\n", __func__);
929
930	genpd = dev_to_genpd(dev);
931	if (IS_ERR(genpd))
932		return -EINVAL;
933
934	if (genpd->suspend_power_off)
935		return 0;
936
937	ret = pm_generic_poweroff_noirq(dev);
938	if (ret)
939		return ret;
940
941	if (device_may_wakeup(dev)
942	    && genpd->active_wakeup && genpd->active_wakeup(dev))
943		return 0;
944
945	if (genpd->stop_device)
946		genpd->stop_device(dev);
947
948	/*
949	 * Since all of the "noirq" callbacks are executed sequentially, it is
950	 * guaranteed that this function will never run twice in parallel for
951	 * the same PM domain, so it is not necessary to use locking here.
952	 */
953	genpd->suspended_count++;
954	pm_genpd_sync_poweroff(genpd);
955
956	return 0;
957}
958
959/**
960 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
961 * @dev: Device to resume.
962 *
963 * Carry out an early restore of a device under the assumption that its
964 * pm_domain field points to the domain member of an object of type
965 * struct generic_pm_domain representing a power domain consisting of I/O
966 * devices.
967 */
968static int pm_genpd_restore_noirq(struct device *dev)
969{
970	struct generic_pm_domain *genpd;
971
972	dev_dbg(dev, "%s()\n", __func__);
973
974	genpd = dev_to_genpd(dev);
975	if (IS_ERR(genpd))
976		return -EINVAL;
977
978	/*
979	 * Since all of the "noirq" callbacks are executed sequentially, it is
980	 * guaranteed that this function will never run twice in parallel for
981	 * the same PM domain, so it is not necessary to use locking here.
982	 */
983	genpd->status = GPD_STATE_POWER_OFF;
984	if (genpd->suspend_power_off) {
985		/*
986		 * The boot kernel might put the domain into the power on state,
987		 * so make sure it really is powered off.
988		 */
989		if (genpd->power_off)
990			genpd->power_off(genpd);
991		return 0;
992	}
993
994	pm_genpd_poweron(genpd);
995	genpd->suspended_count--;
996	if (genpd->start_device)
997		genpd->start_device(dev);
998
999	return pm_generic_restore_noirq(dev);
1000}
1001
1002/**
1003 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
1004 * @dev: Device to resume.
1005 *
1006 * Restore a device under the assumption that its pm_domain field points to the
1007 * domain member of an object of type struct generic_pm_domain representing
1008 * a power domain consisting of I/O devices.
1009 */
1010static int pm_genpd_restore(struct device *dev)
1011{
1012	struct generic_pm_domain *genpd;
1013
1014	dev_dbg(dev, "%s()\n", __func__);
1015
1016	genpd = dev_to_genpd(dev);
1017	if (IS_ERR(genpd))
1018		return -EINVAL;
1019
1020	return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
1021}
1022
1023/**
1024 * pm_genpd_complete - Complete power transition of a device in a power domain.
1025 * @dev: Device to complete the transition of.
1026 *
1027 * Complete a power transition of a device (during a system-wide power
1028 * transition) under the assumption that its pm_domain field points to the
1029 * domain member of an object of type struct generic_pm_domain representing
1030 * a power domain consisting of I/O devices.
1031 */
1032static void pm_genpd_complete(struct device *dev)
1033{
1034	struct generic_pm_domain *genpd;
1035	bool run_complete;
1036
1037	dev_dbg(dev, "%s()\n", __func__);
1038
1039	genpd = dev_to_genpd(dev);
1040	if (IS_ERR(genpd))
1041		return;
1042
1043	mutex_lock(&genpd->lock);
1044
1045	run_complete = !genpd->suspend_power_off;
1046	if (--genpd->prepared_count == 0)
1047		genpd->suspend_power_off = false;
1048
1049	mutex_unlock(&genpd->lock);
1050
1051	if (run_complete) {
1052		pm_generic_complete(dev);
1053		pm_runtime_set_active(dev);
1054		pm_runtime_enable(dev);
1055		pm_runtime_idle(dev);
1056	}
1057}
1058
1059#else
1060
1061#define pm_genpd_prepare		NULL
1062#define pm_genpd_suspend		NULL
1063#define pm_genpd_suspend_noirq		NULL
1064#define pm_genpd_resume_noirq		NULL
1065#define pm_genpd_resume			NULL
1066#define pm_genpd_freeze			NULL
1067#define pm_genpd_freeze_noirq		NULL
1068#define pm_genpd_thaw_noirq		NULL
1069#define pm_genpd_thaw			NULL
1070#define pm_genpd_dev_poweroff_noirq	NULL
1071#define pm_genpd_dev_poweroff		NULL
1072#define pm_genpd_restore_noirq		NULL
1073#define pm_genpd_restore		NULL
1074#define pm_genpd_complete		NULL
1075
1076#endif /* CONFIG_PM_SLEEP */
1077
1078/**
1079 * pm_genpd_add_device - Add a device to an I/O PM domain.
1080 * @genpd: PM domain to add the device to.
1081 * @dev: Device to be added.
1082 */
1083int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1084{
1085	struct generic_pm_domain_data *gpd_data;
1086	struct pm_domain_data *pdd;
1087	int ret = 0;
1088
1089	dev_dbg(dev, "%s()\n", __func__);
1090
1091	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1092		return -EINVAL;
1093
1094	genpd_acquire_lock(genpd);
1095
1096	if (genpd->status == GPD_STATE_POWER_OFF) {
1097		ret = -EINVAL;
1098		goto out;
1099	}
1100
1101	if (genpd->prepared_count > 0) {
1102		ret = -EAGAIN;
1103		goto out;
1104	}
1105
1106	list_for_each_entry(pdd, &genpd->dev_list, list_node)
1107		if (pdd->dev == dev) {
1108			ret = -EINVAL;
1109			goto out;
1110		}
1111
1112	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1113	if (!gpd_data) {
1114		ret = -ENOMEM;
1115		goto out;
1116	}
1117
1118	genpd->device_count++;
1119
1120	dev->pm_domain = &genpd->domain;
1121	dev_pm_get_subsys_data(dev);
1122	dev->power.subsys_data->domain_data = &gpd_data->base;
1123	gpd_data->base.dev = dev;
1124	gpd_data->need_restore = false;
1125	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1126
1127 out:
1128	genpd_release_lock(genpd);
1129
1130	return ret;
1131}
1132
1133/**
1134 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1135 * @genpd: PM domain to remove the device from.
1136 * @dev: Device to be removed.
1137 */
1138int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1139			   struct device *dev)
1140{
1141	struct pm_domain_data *pdd;
1142	int ret = -EINVAL;
1143
1144	dev_dbg(dev, "%s()\n", __func__);
1145
1146	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1147		return -EINVAL;
1148
1149	genpd_acquire_lock(genpd);
1150
1151	if (genpd->prepared_count > 0) {
1152		ret = -EAGAIN;
1153		goto out;
1154	}
1155
1156	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
1157		if (pdd->dev != dev)
1158			continue;
1159
1160		list_del_init(&pdd->list_node);
1161		pdd->dev = NULL;
1162		dev_pm_put_subsys_data(dev);
1163		dev->pm_domain = NULL;
1164		kfree(to_gpd_data(pdd));
1165
1166		genpd->device_count--;
1167
1168		ret = 0;
1169		break;
1170	}
1171
1172 out:
1173	genpd_release_lock(genpd);
1174
1175	return ret;
1176}
1177
1178/**
1179 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1180 * @genpd: Master PM domain to add the subdomain to.
1181 * @subdomain: Subdomain to be added.
1182 */
1183int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1184			   struct generic_pm_domain *subdomain)
1185{
1186	struct gpd_link *link;
1187	int ret = 0;
1188
1189	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1190		return -EINVAL;
1191
1192 start:
1193	genpd_acquire_lock(genpd);
1194	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1195
1196	if (subdomain->status != GPD_STATE_POWER_OFF
1197	    && subdomain->status != GPD_STATE_ACTIVE) {
1198		mutex_unlock(&subdomain->lock);
1199		genpd_release_lock(genpd);
1200		goto start;
1201	}
1202
1203	if (genpd->status == GPD_STATE_POWER_OFF
1204	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1205		ret = -EINVAL;
1206		goto out;
1207	}
1208
1209	list_for_each_entry(link, &genpd->slave_links, slave_node) {
1210		if (link->slave == subdomain && link->master == genpd) {
1211			ret = -EINVAL;
1212			goto out;
1213		}
1214	}
1215
1216	link = kzalloc(sizeof(*link), GFP_KERNEL);
1217	if (!link) {
1218		ret = -ENOMEM;
1219		goto out;
1220	}
1221	link->master = genpd;
1222	list_add_tail(&link->master_node, &genpd->master_links);
1223	link->slave = subdomain;
1224	list_add_tail(&link->slave_node, &subdomain->slave_links);
1225	if (subdomain->status != GPD_STATE_POWER_OFF)
1226		genpd_sd_counter_inc(genpd);
1227
1228 out:
1229	mutex_unlock(&subdomain->lock);
1230	genpd_release_lock(genpd);
1231
1232	return ret;
1233}
1234
1235/**
1236 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1237 * @genpd: Master PM domain to remove the subdomain from.
1238 * @subdomain: Subdomain to be removed.
1239 */
1240int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1241			      struct generic_pm_domain *subdomain)
1242{
1243	struct gpd_link *link;
1244	int ret = -EINVAL;
1245
1246	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1247		return -EINVAL;
1248
1249 start:
1250	genpd_acquire_lock(genpd);
1251
1252	list_for_each_entry(link, &genpd->master_links, master_node) {
1253		if (link->slave != subdomain)
1254			continue;
1255
1256		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1257
1258		if (subdomain->status != GPD_STATE_POWER_OFF
1259		    && subdomain->status != GPD_STATE_ACTIVE) {
1260			mutex_unlock(&subdomain->lock);
1261			genpd_release_lock(genpd);
1262			goto start;
1263		}
1264
1265		list_del(&link->master_node);
1266		list_del(&link->slave_node);
1267		kfree(link);
1268		if (subdomain->status != GPD_STATE_POWER_OFF)
1269			genpd_sd_counter_dec(genpd);
1270
1271		mutex_unlock(&subdomain->lock);
1272
1273		ret = 0;
1274		break;
1275	}
1276
1277	genpd_release_lock(genpd);
1278
1279	return ret;
1280}
1281
1282/**
1283 * pm_genpd_init - Initialize a generic I/O PM domain object.
1284 * @genpd: PM domain object to initialize.
1285 * @gov: PM domain governor to associate with the domain (may be NULL).
1286 * @is_off: Initial value of the domain's power_is_off field.
1287 */
1288void pm_genpd_init(struct generic_pm_domain *genpd,
1289		   struct dev_power_governor *gov, bool is_off)
1290{
1291	if (IS_ERR_OR_NULL(genpd))
1292		return;
1293
1294	INIT_LIST_HEAD(&genpd->master_links);
1295	INIT_LIST_HEAD(&genpd->slave_links);
1296	INIT_LIST_HEAD(&genpd->dev_list);
1297	mutex_init(&genpd->lock);
1298	genpd->gov = gov;
1299	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1300	genpd->in_progress = 0;
1301	atomic_set(&genpd->sd_count, 0);
1302	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1303	init_waitqueue_head(&genpd->status_wait_queue);
1304	genpd->poweroff_task = NULL;
1305	genpd->resume_count = 0;
1306	genpd->device_count = 0;
1307	genpd->suspended_count = 0;
1308	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1309	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1310	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1311	genpd->domain.ops.prepare = pm_genpd_prepare;
1312	genpd->domain.ops.suspend = pm_genpd_suspend;
1313	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1314	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1315	genpd->domain.ops.resume = pm_genpd_resume;
1316	genpd->domain.ops.freeze = pm_genpd_freeze;
1317	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319	genpd->domain.ops.thaw = pm_genpd_thaw;
1320	genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1321	genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1322	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1323	genpd->domain.ops.restore = pm_genpd_restore;
1324	genpd->domain.ops.complete = pm_genpd_complete;
1325	mutex_lock(&gpd_list_lock);
1326	list_add(&genpd->gpd_list_node, &gpd_list);
1327	mutex_unlock(&gpd_list_lock);
1328}
1329