domain.c revision b3d3b9fb6016e6eacd3ae49fb786806d00c43e7b
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/pm_qos.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sched.h>
18#include <linux/suspend.h>
19#include <linux/export.h>
20
21#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
22({								\
23	type (*__routine)(struct device *__d); 			\
24	type __ret = (type)0;					\
25								\
26	__routine = genpd->dev_ops.callback; 			\
27	if (__routine) {					\
28		__ret = __routine(dev); 			\
29	} else {						\
30		__routine = dev_gpd_data(dev)->ops.callback;	\
31		if (__routine) 					\
32			__ret = __routine(dev);			\
33	}							\
34	__ret;							\
35})
36
37#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)	\
38({										\
39	ktime_t __start = ktime_get();						\
40	type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);		\
41	s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));		\
42	struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;			\
43	if (!__retval && __elapsed > __td->field) {				\
44		__td->field = __elapsed;					\
45		dev_warn(dev, name " latency exceeded, new value %lld ns\n",	\
46			__elapsed);						\
47		genpd->max_off_time_changed = true;				\
48		__td->constraint_changed = true;				\
49	}									\
50	__retval;								\
51})
52
53static LIST_HEAD(gpd_list);
54static DEFINE_MUTEX(gpd_list_lock);
55
56static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
57{
58	struct generic_pm_domain *genpd = NULL, *gpd;
59
60	if (IS_ERR_OR_NULL(domain_name))
61		return NULL;
62
63	mutex_lock(&gpd_list_lock);
64	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
65		if (!strcmp(gpd->name, domain_name)) {
66			genpd = gpd;
67			break;
68		}
69	}
70	mutex_unlock(&gpd_list_lock);
71	return genpd;
72}
73
74#ifdef CONFIG_PM
75
76struct generic_pm_domain *dev_to_genpd(struct device *dev)
77{
78	if (IS_ERR_OR_NULL(dev->pm_domain))
79		return ERR_PTR(-EINVAL);
80
81	return pd_to_genpd(dev->pm_domain);
82}
83
84static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
85{
86	return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
87					stop_latency_ns, "stop");
88}
89
90static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
91{
92	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
93					start_latency_ns, "start");
94}
95
96static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
97{
98	bool ret = false;
99
100	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
101		ret = !!atomic_dec_and_test(&genpd->sd_count);
102
103	return ret;
104}
105
106static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
107{
108	atomic_inc(&genpd->sd_count);
109	smp_mb__after_atomic_inc();
110}
111
112static void genpd_acquire_lock(struct generic_pm_domain *genpd)
113{
114	DEFINE_WAIT(wait);
115
116	mutex_lock(&genpd->lock);
117	/*
118	 * Wait for the domain to transition into either the active,
119	 * or the power off state.
120	 */
121	for (;;) {
122		prepare_to_wait(&genpd->status_wait_queue, &wait,
123				TASK_UNINTERRUPTIBLE);
124		if (genpd->status == GPD_STATE_ACTIVE
125		    || genpd->status == GPD_STATE_POWER_OFF)
126			break;
127		mutex_unlock(&genpd->lock);
128
129		schedule();
130
131		mutex_lock(&genpd->lock);
132	}
133	finish_wait(&genpd->status_wait_queue, &wait);
134}
135
136static void genpd_release_lock(struct generic_pm_domain *genpd)
137{
138	mutex_unlock(&genpd->lock);
139}
140
141static void genpd_set_active(struct generic_pm_domain *genpd)
142{
143	if (genpd->resume_count == 0)
144		genpd->status = GPD_STATE_ACTIVE;
145}
146
147static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
148{
149	s64 usecs64;
150
151	if (!genpd->cpu_data)
152		return;
153
154	usecs64 = genpd->power_on_latency_ns;
155	do_div(usecs64, NSEC_PER_USEC);
156	usecs64 += genpd->cpu_data->saved_exit_latency;
157	genpd->cpu_data->idle_state->exit_latency = usecs64;
158}
159
160/**
161 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
162 * @genpd: PM domain to power up.
163 *
164 * Restore power to @genpd and all of its masters so that it is possible to
165 * resume a device belonging to it.
166 */
167static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
168	__releases(&genpd->lock) __acquires(&genpd->lock)
169{
170	struct gpd_link *link;
171	DEFINE_WAIT(wait);
172	int ret = 0;
173
174	/* If the domain's master is being waited for, we have to wait too. */
175	for (;;) {
176		prepare_to_wait(&genpd->status_wait_queue, &wait,
177				TASK_UNINTERRUPTIBLE);
178		if (genpd->status != GPD_STATE_WAIT_MASTER)
179			break;
180		mutex_unlock(&genpd->lock);
181
182		schedule();
183
184		mutex_lock(&genpd->lock);
185	}
186	finish_wait(&genpd->status_wait_queue, &wait);
187
188	if (genpd->status == GPD_STATE_ACTIVE
189	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
190		return 0;
191
192	if (genpd->status != GPD_STATE_POWER_OFF) {
193		genpd_set_active(genpd);
194		return 0;
195	}
196
197	if (genpd->cpu_data) {
198		cpuidle_pause_and_lock();
199		genpd->cpu_data->idle_state->disabled = true;
200		cpuidle_resume_and_unlock();
201		goto out;
202	}
203
204	/*
205	 * The list is guaranteed not to change while the loop below is being
206	 * executed, unless one of the masters' .power_on() callbacks fiddles
207	 * with it.
208	 */
209	list_for_each_entry(link, &genpd->slave_links, slave_node) {
210		genpd_sd_counter_inc(link->master);
211		genpd->status = GPD_STATE_WAIT_MASTER;
212
213		mutex_unlock(&genpd->lock);
214
215		ret = pm_genpd_poweron(link->master);
216
217		mutex_lock(&genpd->lock);
218
219		/*
220		 * The "wait for parent" status is guaranteed not to change
221		 * while the master is powering on.
222		 */
223		genpd->status = GPD_STATE_POWER_OFF;
224		wake_up_all(&genpd->status_wait_queue);
225		if (ret) {
226			genpd_sd_counter_dec(link->master);
227			goto err;
228		}
229	}
230
231	if (genpd->power_on) {
232		ktime_t time_start = ktime_get();
233		s64 elapsed_ns;
234
235		ret = genpd->power_on(genpd);
236		if (ret)
237			goto err;
238
239		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
240		if (elapsed_ns > genpd->power_on_latency_ns) {
241			genpd->power_on_latency_ns = elapsed_ns;
242			genpd->max_off_time_changed = true;
243			genpd_recalc_cpu_exit_latency(genpd);
244			if (genpd->name)
245				pr_warning("%s: Power-on latency exceeded, "
246					"new value %lld ns\n", genpd->name,
247					elapsed_ns);
248		}
249	}
250
251 out:
252	genpd_set_active(genpd);
253
254	return 0;
255
256 err:
257	list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
258		genpd_sd_counter_dec(link->master);
259
260	return ret;
261}
262
263/**
264 * pm_genpd_poweron - Restore power to a given PM domain and its masters.
265 * @genpd: PM domain to power up.
266 */
267int pm_genpd_poweron(struct generic_pm_domain *genpd)
268{
269	int ret;
270
271	mutex_lock(&genpd->lock);
272	ret = __pm_genpd_poweron(genpd);
273	mutex_unlock(&genpd->lock);
274	return ret;
275}
276
277/**
278 * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
279 * @domain_name: Name of the PM domain to power up.
280 */
281int pm_genpd_name_poweron(const char *domain_name)
282{
283	struct generic_pm_domain *genpd;
284
285	genpd = pm_genpd_lookup_name(domain_name);
286	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
287}
288
289#endif /* CONFIG_PM */
290
291#ifdef CONFIG_PM_RUNTIME
292
293static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
294				     struct device *dev)
295{
296	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
297}
298
299static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
300{
301	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
302					save_state_latency_ns, "state save");
303}
304
305static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
306{
307	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
308					restore_state_latency_ns,
309					"state restore");
310}
311
312static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
313				     unsigned long val, void *ptr)
314{
315	struct generic_pm_domain_data *gpd_data;
316	struct device *dev;
317
318	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
319
320	mutex_lock(&gpd_data->lock);
321	dev = gpd_data->base.dev;
322	if (!dev) {
323		mutex_unlock(&gpd_data->lock);
324		return NOTIFY_DONE;
325	}
326	mutex_unlock(&gpd_data->lock);
327
328	for (;;) {
329		struct generic_pm_domain *genpd;
330		struct pm_domain_data *pdd;
331
332		spin_lock_irq(&dev->power.lock);
333
334		pdd = dev->power.subsys_data ?
335				dev->power.subsys_data->domain_data : NULL;
336		if (pdd && pdd->dev) {
337			to_gpd_data(pdd)->td.constraint_changed = true;
338			genpd = dev_to_genpd(dev);
339		} else {
340			genpd = ERR_PTR(-ENODATA);
341		}
342
343		spin_unlock_irq(&dev->power.lock);
344
345		if (!IS_ERR(genpd)) {
346			mutex_lock(&genpd->lock);
347			genpd->max_off_time_changed = true;
348			mutex_unlock(&genpd->lock);
349		}
350
351		dev = dev->parent;
352		if (!dev || dev->power.ignore_children)
353			break;
354	}
355
356	return NOTIFY_DONE;
357}
358
359/**
360 * __pm_genpd_save_device - Save the pre-suspend state of a device.
361 * @pdd: Domain data of the device to save the state of.
362 * @genpd: PM domain the device belongs to.
363 */
364static int __pm_genpd_save_device(struct pm_domain_data *pdd,
365				  struct generic_pm_domain *genpd)
366	__releases(&genpd->lock) __acquires(&genpd->lock)
367{
368	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
369	struct device *dev = pdd->dev;
370	int ret = 0;
371
372	if (gpd_data->need_restore)
373		return 0;
374
375	mutex_unlock(&genpd->lock);
376
377	genpd_start_dev(genpd, dev);
378	ret = genpd_save_dev(genpd, dev);
379	genpd_stop_dev(genpd, dev);
380
381	mutex_lock(&genpd->lock);
382
383	if (!ret)
384		gpd_data->need_restore = true;
385
386	return ret;
387}
388
389/**
390 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
391 * @pdd: Domain data of the device to restore the state of.
392 * @genpd: PM domain the device belongs to.
393 */
394static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
395				      struct generic_pm_domain *genpd)
396	__releases(&genpd->lock) __acquires(&genpd->lock)
397{
398	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
399	struct device *dev = pdd->dev;
400	bool need_restore = gpd_data->need_restore;
401
402	gpd_data->need_restore = false;
403	mutex_unlock(&genpd->lock);
404
405	genpd_start_dev(genpd, dev);
406	if (need_restore)
407		genpd_restore_dev(genpd, dev);
408
409	mutex_lock(&genpd->lock);
410}
411
412/**
413 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
414 * @genpd: PM domain to check.
415 *
416 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
417 * a "power off" operation, which means that a "power on" has occured in the
418 * meantime, or if its resume_count field is different from zero, which means
419 * that one of its devices has been resumed in the meantime.
420 */
421static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
422{
423	return genpd->status == GPD_STATE_WAIT_MASTER
424		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
425}
426
427/**
428 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
429 * @genpd: PM domait to power off.
430 *
431 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
432 * before.
433 */
434void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435{
436	if (!work_pending(&genpd->power_off_work))
437		queue_work(pm_wq, &genpd->power_off_work);
438}
439
440/**
441 * pm_genpd_poweroff - Remove power from a given PM domain.
442 * @genpd: PM domain to power down.
443 *
444 * If all of the @genpd's devices have been suspended and all of its subdomains
445 * have been powered down, run the runtime suspend callbacks provided by all of
446 * the @genpd's devices' drivers and remove power from @genpd.
447 */
448static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
449	__releases(&genpd->lock) __acquires(&genpd->lock)
450{
451	struct pm_domain_data *pdd;
452	struct gpd_link *link;
453	unsigned int not_suspended;
454	int ret = 0;
455
456 start:
457	/*
458	 * Do not try to power off the domain in the following situations:
459	 * (1) The domain is already in the "power off" state.
460	 * (2) The domain is waiting for its master to power up.
461	 * (3) One of the domain's devices is being resumed right now.
462	 * (4) System suspend is in progress.
463	 */
464	if (genpd->status == GPD_STATE_POWER_OFF
465	    || genpd->status == GPD_STATE_WAIT_MASTER
466	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
467		return 0;
468
469	if (atomic_read(&genpd->sd_count) > 0)
470		return -EBUSY;
471
472	not_suspended = 0;
473	list_for_each_entry(pdd, &genpd->dev_list, list_node)
474		if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
475		    || pdd->dev->power.irq_safe))
476			not_suspended++;
477
478	if (not_suspended > genpd->in_progress)
479		return -EBUSY;
480
481	if (genpd->poweroff_task) {
482		/*
483		 * Another instance of pm_genpd_poweroff() is executing
484		 * callbacks, so tell it to start over and return.
485		 */
486		genpd->status = GPD_STATE_REPEAT;
487		return 0;
488	}
489
490	if (genpd->gov && genpd->gov->power_down_ok) {
491		if (!genpd->gov->power_down_ok(&genpd->domain))
492			return -EAGAIN;
493	}
494
495	genpd->status = GPD_STATE_BUSY;
496	genpd->poweroff_task = current;
497
498	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
499		ret = atomic_read(&genpd->sd_count) == 0 ?
500			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
501
502		if (genpd_abort_poweroff(genpd))
503			goto out;
504
505		if (ret) {
506			genpd_set_active(genpd);
507			goto out;
508		}
509
510		if (genpd->status == GPD_STATE_REPEAT) {
511			genpd->poweroff_task = NULL;
512			goto start;
513		}
514	}
515
516	if (genpd->cpu_data) {
517		/*
518		 * If cpu_data is set, cpuidle should turn the domain off when
519		 * the CPU in it is idle.  In that case we don't decrement the
520		 * subdomain counts of the master domains, so that power is not
521		 * removed from the current domain prematurely as a result of
522		 * cutting off the masters' power.
523		 */
524		genpd->status = GPD_STATE_POWER_OFF;
525		cpuidle_pause_and_lock();
526		genpd->cpu_data->idle_state->disabled = false;
527		cpuidle_resume_and_unlock();
528		goto out;
529	}
530
531	if (genpd->power_off) {
532		ktime_t time_start;
533		s64 elapsed_ns;
534
535		if (atomic_read(&genpd->sd_count) > 0) {
536			ret = -EBUSY;
537			goto out;
538		}
539
540		time_start = ktime_get();
541
542		/*
543		 * If sd_count > 0 at this point, one of the subdomains hasn't
544		 * managed to call pm_genpd_poweron() for the master yet after
545		 * incrementing it.  In that case pm_genpd_poweron() will wait
546		 * for us to drop the lock, so we can call .power_off() and let
547		 * the pm_genpd_poweron() restore power for us (this shouldn't
548		 * happen very often).
549		 */
550		ret = genpd->power_off(genpd);
551		if (ret == -EBUSY) {
552			genpd_set_active(genpd);
553			goto out;
554		}
555
556		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
557		if (elapsed_ns > genpd->power_off_latency_ns) {
558			genpd->power_off_latency_ns = elapsed_ns;
559			genpd->max_off_time_changed = true;
560			if (genpd->name)
561				pr_warning("%s: Power-off latency exceeded, "
562					"new value %lld ns\n", genpd->name,
563					elapsed_ns);
564		}
565	}
566
567	genpd->status = GPD_STATE_POWER_OFF;
568
569	list_for_each_entry(link, &genpd->slave_links, slave_node) {
570		genpd_sd_counter_dec(link->master);
571		genpd_queue_power_off_work(link->master);
572	}
573
574 out:
575	genpd->poweroff_task = NULL;
576	wake_up_all(&genpd->status_wait_queue);
577	return ret;
578}
579
580/**
581 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
582 * @work: Work structure used for scheduling the execution of this function.
583 */
584static void genpd_power_off_work_fn(struct work_struct *work)
585{
586	struct generic_pm_domain *genpd;
587
588	genpd = container_of(work, struct generic_pm_domain, power_off_work);
589
590	genpd_acquire_lock(genpd);
591	pm_genpd_poweroff(genpd);
592	genpd_release_lock(genpd);
593}
594
595/**
596 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
597 * @dev: Device to suspend.
598 *
599 * Carry out a runtime suspend of a device under the assumption that its
600 * pm_domain field points to the domain member of an object of type
601 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
602 */
603static int pm_genpd_runtime_suspend(struct device *dev)
604{
605	struct generic_pm_domain *genpd;
606	bool (*stop_ok)(struct device *__dev);
607	int ret;
608
609	dev_dbg(dev, "%s()\n", __func__);
610
611	genpd = dev_to_genpd(dev);
612	if (IS_ERR(genpd))
613		return -EINVAL;
614
615	might_sleep_if(!genpd->dev_irq_safe);
616
617	stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
618	if (stop_ok && !stop_ok(dev))
619		return -EBUSY;
620
621	ret = genpd_stop_dev(genpd, dev);
622	if (ret)
623		return ret;
624
625	/*
626	 * If power.irq_safe is set, this routine will be run with interrupts
627	 * off, so it can't use mutexes.
628	 */
629	if (dev->power.irq_safe)
630		return 0;
631
632	mutex_lock(&genpd->lock);
633	genpd->in_progress++;
634	pm_genpd_poweroff(genpd);
635	genpd->in_progress--;
636	mutex_unlock(&genpd->lock);
637
638	return 0;
639}
640
641/**
642 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
643 * @dev: Device to resume.
644 *
645 * Carry out a runtime resume of a device under the assumption that its
646 * pm_domain field points to the domain member of an object of type
647 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
648 */
649static int pm_genpd_runtime_resume(struct device *dev)
650{
651	struct generic_pm_domain *genpd;
652	DEFINE_WAIT(wait);
653	int ret;
654
655	dev_dbg(dev, "%s()\n", __func__);
656
657	genpd = dev_to_genpd(dev);
658	if (IS_ERR(genpd))
659		return -EINVAL;
660
661	might_sleep_if(!genpd->dev_irq_safe);
662
663	/* If power.irq_safe, the PM domain is never powered off. */
664	if (dev->power.irq_safe)
665		return genpd_start_dev_no_timing(genpd, dev);
666
667	mutex_lock(&genpd->lock);
668	ret = __pm_genpd_poweron(genpd);
669	if (ret) {
670		mutex_unlock(&genpd->lock);
671		return ret;
672	}
673	genpd->status = GPD_STATE_BUSY;
674	genpd->resume_count++;
675	for (;;) {
676		prepare_to_wait(&genpd->status_wait_queue, &wait,
677				TASK_UNINTERRUPTIBLE);
678		/*
679		 * If current is the powering off task, we have been called
680		 * reentrantly from one of the device callbacks, so we should
681		 * not wait.
682		 */
683		if (!genpd->poweroff_task || genpd->poweroff_task == current)
684			break;
685		mutex_unlock(&genpd->lock);
686
687		schedule();
688
689		mutex_lock(&genpd->lock);
690	}
691	finish_wait(&genpd->status_wait_queue, &wait);
692	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
693	genpd->resume_count--;
694	genpd_set_active(genpd);
695	wake_up_all(&genpd->status_wait_queue);
696	mutex_unlock(&genpd->lock);
697
698	return 0;
699}
700
701/**
702 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
703 */
704void pm_genpd_poweroff_unused(void)
705{
706	struct generic_pm_domain *genpd;
707
708	mutex_lock(&gpd_list_lock);
709
710	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
711		genpd_queue_power_off_work(genpd);
712
713	mutex_unlock(&gpd_list_lock);
714}
715
716#else
717
718static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
719					    unsigned long val, void *ptr)
720{
721	return NOTIFY_DONE;
722}
723
724static inline void genpd_power_off_work_fn(struct work_struct *work) {}
725
726#define pm_genpd_runtime_suspend	NULL
727#define pm_genpd_runtime_resume		NULL
728
729#endif /* CONFIG_PM_RUNTIME */
730
731#ifdef CONFIG_PM_SLEEP
732
733/**
734 * pm_genpd_present - Check if the given PM domain has been initialized.
735 * @genpd: PM domain to check.
736 */
737static bool pm_genpd_present(struct generic_pm_domain *genpd)
738{
739	struct generic_pm_domain *gpd;
740
741	if (IS_ERR_OR_NULL(genpd))
742		return false;
743
744	list_for_each_entry(gpd, &gpd_list, gpd_list_node)
745		if (gpd == genpd)
746			return true;
747
748	return false;
749}
750
751static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
752				    struct device *dev)
753{
754	return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
755}
756
757static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
758{
759	return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
760}
761
762static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
763{
764	return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
765}
766
767static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
768{
769	return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
770}
771
772static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
773{
774	return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
775}
776
777static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
778{
779	return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
780}
781
782static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
783{
784	return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
785}
786
787static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
788{
789	return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
790}
791
792static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
793{
794	return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
795}
796
797/**
798 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
799 * @genpd: PM domain to power off, if possible.
800 *
801 * Check if the given PM domain can be powered off (during system suspend or
802 * hibernation) and do that if so.  Also, in that case propagate to its masters.
803 *
804 * This function is only called in "noirq" and "syscore" stages of system power
805 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
806 * executed sequentially, so it is guaranteed that it will never run twice in
807 * parallel).
808 */
809static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
810{
811	struct gpd_link *link;
812
813	if (genpd->status == GPD_STATE_POWER_OFF)
814		return;
815
816	if (genpd->suspended_count != genpd->device_count
817	    || atomic_read(&genpd->sd_count) > 0)
818		return;
819
820	if (genpd->power_off)
821		genpd->power_off(genpd);
822
823	genpd->status = GPD_STATE_POWER_OFF;
824
825	list_for_each_entry(link, &genpd->slave_links, slave_node) {
826		genpd_sd_counter_dec(link->master);
827		pm_genpd_sync_poweroff(link->master);
828	}
829}
830
831/**
832 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
833 * @genpd: PM domain to power on.
834 *
835 * This function is only called in "noirq" and "syscore" stages of system power
836 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
837 * executed sequentially, so it is guaranteed that it will never run twice in
838 * parallel).
839 */
840static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
841{
842	struct gpd_link *link;
843
844	if (genpd->status != GPD_STATE_POWER_OFF)
845		return;
846
847	list_for_each_entry(link, &genpd->slave_links, slave_node) {
848		pm_genpd_sync_poweron(link->master);
849		genpd_sd_counter_inc(link->master);
850	}
851
852	if (genpd->power_on)
853		genpd->power_on(genpd);
854
855	genpd->status = GPD_STATE_ACTIVE;
856}
857
858/**
859 * resume_needed - Check whether to resume a device before system suspend.
860 * @dev: Device to check.
861 * @genpd: PM domain the device belongs to.
862 *
863 * There are two cases in which a device that can wake up the system from sleep
864 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
865 * to wake up the system and it has to remain active for this purpose while the
866 * system is in the sleep state and (2) if the device is not enabled to wake up
867 * the system from sleep states and it generally doesn't generate wakeup signals
868 * by itself (those signals are generated on its behalf by other parts of the
869 * system).  In the latter case it may be necessary to reconfigure the device's
870 * wakeup settings during system suspend, because it may have been set up to
871 * signal remote wakeup from the system's working state as needed by runtime PM.
872 * Return 'true' in either of the above cases.
873 */
874static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
875{
876	bool active_wakeup;
877
878	if (!device_can_wakeup(dev))
879		return false;
880
881	active_wakeup = genpd_dev_active_wakeup(genpd, dev);
882	return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
883}
884
885/**
886 * pm_genpd_prepare - Start power transition of a device in a PM domain.
887 * @dev: Device to start the transition of.
888 *
889 * Start a power transition of a device (during a system-wide power transition)
890 * under the assumption that its pm_domain field points to the domain member of
891 * an object of type struct generic_pm_domain representing a PM domain
892 * consisting of I/O devices.
893 */
894static int pm_genpd_prepare(struct device *dev)
895{
896	struct generic_pm_domain *genpd;
897	int ret;
898
899	dev_dbg(dev, "%s()\n", __func__);
900
901	genpd = dev_to_genpd(dev);
902	if (IS_ERR(genpd))
903		return -EINVAL;
904
905	/*
906	 * If a wakeup request is pending for the device, it should be woken up
907	 * at this point and a system wakeup event should be reported if it's
908	 * set up to wake up the system from sleep states.
909	 */
910	pm_runtime_get_noresume(dev);
911	if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
912		pm_wakeup_event(dev, 0);
913
914	if (pm_wakeup_pending()) {
915		pm_runtime_put_sync(dev);
916		return -EBUSY;
917	}
918
919	if (resume_needed(dev, genpd))
920		pm_runtime_resume(dev);
921
922	genpd_acquire_lock(genpd);
923
924	if (genpd->prepared_count++ == 0) {
925		genpd->suspended_count = 0;
926		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
927	}
928
929	genpd_release_lock(genpd);
930
931	if (genpd->suspend_power_off) {
932		pm_runtime_put_noidle(dev);
933		return 0;
934	}
935
936	/*
937	 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
938	 * so pm_genpd_poweron() will return immediately, but if the device
939	 * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
940	 * to make it operational.
941	 */
942	pm_runtime_resume(dev);
943	__pm_runtime_disable(dev, false);
944
945	ret = pm_generic_prepare(dev);
946	if (ret) {
947		mutex_lock(&genpd->lock);
948
949		if (--genpd->prepared_count == 0)
950			genpd->suspend_power_off = false;
951
952		mutex_unlock(&genpd->lock);
953		pm_runtime_enable(dev);
954	}
955
956	pm_runtime_put_sync(dev);
957	return ret;
958}
959
960/**
961 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
962 * @dev: Device to suspend.
963 *
964 * Suspend a device under the assumption that its pm_domain field points to the
965 * domain member of an object of type struct generic_pm_domain representing
966 * a PM domain consisting of I/O devices.
967 */
968static int pm_genpd_suspend(struct device *dev)
969{
970	struct generic_pm_domain *genpd;
971
972	dev_dbg(dev, "%s()\n", __func__);
973
974	genpd = dev_to_genpd(dev);
975	if (IS_ERR(genpd))
976		return -EINVAL;
977
978	return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
979}
980
981/**
982 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
983 * @dev: Device to suspend.
984 *
985 * Carry out a late suspend of a device under the assumption that its
986 * pm_domain field points to the domain member of an object of type
987 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
988 */
989static int pm_genpd_suspend_late(struct device *dev)
990{
991	struct generic_pm_domain *genpd;
992
993	dev_dbg(dev, "%s()\n", __func__);
994
995	genpd = dev_to_genpd(dev);
996	if (IS_ERR(genpd))
997		return -EINVAL;
998
999	return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
1000}
1001
1002/**
1003 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1004 * @dev: Device to suspend.
1005 *
1006 * Stop the device and remove power from the domain if all devices in it have
1007 * been stopped.
1008 */
1009static int pm_genpd_suspend_noirq(struct device *dev)
1010{
1011	struct generic_pm_domain *genpd;
1012
1013	dev_dbg(dev, "%s()\n", __func__);
1014
1015	genpd = dev_to_genpd(dev);
1016	if (IS_ERR(genpd))
1017		return -EINVAL;
1018
1019	if (genpd->suspend_power_off
1020	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1021		return 0;
1022
1023	genpd_stop_dev(genpd, dev);
1024
1025	/*
1026	 * Since all of the "noirq" callbacks are executed sequentially, it is
1027	 * guaranteed that this function will never run twice in parallel for
1028	 * the same PM domain, so it is not necessary to use locking here.
1029	 */
1030	genpd->suspended_count++;
1031	pm_genpd_sync_poweroff(genpd);
1032
1033	return 0;
1034}
1035
1036/**
1037 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1038 * @dev: Device to resume.
1039 *
1040 * Restore power to the device's PM domain, if necessary, and start the device.
1041 */
1042static int pm_genpd_resume_noirq(struct device *dev)
1043{
1044	struct generic_pm_domain *genpd;
1045
1046	dev_dbg(dev, "%s()\n", __func__);
1047
1048	genpd = dev_to_genpd(dev);
1049	if (IS_ERR(genpd))
1050		return -EINVAL;
1051
1052	if (genpd->suspend_power_off
1053	    || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1054		return 0;
1055
1056	/*
1057	 * Since all of the "noirq" callbacks are executed sequentially, it is
1058	 * guaranteed that this function will never run twice in parallel for
1059	 * the same PM domain, so it is not necessary to use locking here.
1060	 */
1061	pm_genpd_sync_poweron(genpd);
1062	genpd->suspended_count--;
1063
1064	return genpd_start_dev(genpd, dev);
1065}
1066
1067/**
1068 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1069 * @dev: Device to resume.
1070 *
1071 * Carry out an early resume of a device under the assumption that its
1072 * pm_domain field points to the domain member of an object of type
1073 * struct generic_pm_domain representing a power domain consisting of I/O
1074 * devices.
1075 */
1076static int pm_genpd_resume_early(struct device *dev)
1077{
1078	struct generic_pm_domain *genpd;
1079
1080	dev_dbg(dev, "%s()\n", __func__);
1081
1082	genpd = dev_to_genpd(dev);
1083	if (IS_ERR(genpd))
1084		return -EINVAL;
1085
1086	return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
1087}
1088
1089/**
1090 * pm_genpd_resume - Resume of device in an I/O PM domain.
1091 * @dev: Device to resume.
1092 *
1093 * Resume a device under the assumption that its pm_domain field points to the
1094 * domain member of an object of type struct generic_pm_domain representing
1095 * a power domain consisting of I/O devices.
1096 */
1097static int pm_genpd_resume(struct device *dev)
1098{
1099	struct generic_pm_domain *genpd;
1100
1101	dev_dbg(dev, "%s()\n", __func__);
1102
1103	genpd = dev_to_genpd(dev);
1104	if (IS_ERR(genpd))
1105		return -EINVAL;
1106
1107	return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
1108}
1109
1110/**
1111 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1112 * @dev: Device to freeze.
1113 *
1114 * Freeze a device under the assumption that its pm_domain field points to the
1115 * domain member of an object of type struct generic_pm_domain representing
1116 * a power domain consisting of I/O devices.
1117 */
1118static int pm_genpd_freeze(struct device *dev)
1119{
1120	struct generic_pm_domain *genpd;
1121
1122	dev_dbg(dev, "%s()\n", __func__);
1123
1124	genpd = dev_to_genpd(dev);
1125	if (IS_ERR(genpd))
1126		return -EINVAL;
1127
1128	return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
1129}
1130
1131/**
1132 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1133 * @dev: Device to freeze.
1134 *
1135 * Carry out a late freeze of a device under the assumption that its
1136 * pm_domain field points to the domain member of an object of type
1137 * struct generic_pm_domain representing a power domain consisting of I/O
1138 * devices.
1139 */
1140static int pm_genpd_freeze_late(struct device *dev)
1141{
1142	struct generic_pm_domain *genpd;
1143
1144	dev_dbg(dev, "%s()\n", __func__);
1145
1146	genpd = dev_to_genpd(dev);
1147	if (IS_ERR(genpd))
1148		return -EINVAL;
1149
1150	return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
1151}
1152
1153/**
1154 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1155 * @dev: Device to freeze.
1156 *
1157 * Carry out a late freeze of a device under the assumption that its
1158 * pm_domain field points to the domain member of an object of type
1159 * struct generic_pm_domain representing a power domain consisting of I/O
1160 * devices.
1161 */
1162static int pm_genpd_freeze_noirq(struct device *dev)
1163{
1164	struct generic_pm_domain *genpd;
1165
1166	dev_dbg(dev, "%s()\n", __func__);
1167
1168	genpd = dev_to_genpd(dev);
1169	if (IS_ERR(genpd))
1170		return -EINVAL;
1171
1172	return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1173}
1174
1175/**
1176 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1177 * @dev: Device to thaw.
1178 *
1179 * Start the device, unless power has been removed from the domain already
1180 * before the system transition.
1181 */
1182static int pm_genpd_thaw_noirq(struct device *dev)
1183{
1184	struct generic_pm_domain *genpd;
1185
1186	dev_dbg(dev, "%s()\n", __func__);
1187
1188	genpd = dev_to_genpd(dev);
1189	if (IS_ERR(genpd))
1190		return -EINVAL;
1191
1192	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1193}
1194
1195/**
1196 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1197 * @dev: Device to thaw.
1198 *
1199 * Carry out an early thaw of a device under the assumption that its
1200 * pm_domain field points to the domain member of an object of type
1201 * struct generic_pm_domain representing a power domain consisting of I/O
1202 * devices.
1203 */
1204static int pm_genpd_thaw_early(struct device *dev)
1205{
1206	struct generic_pm_domain *genpd;
1207
1208	dev_dbg(dev, "%s()\n", __func__);
1209
1210	genpd = dev_to_genpd(dev);
1211	if (IS_ERR(genpd))
1212		return -EINVAL;
1213
1214	return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
1215}
1216
1217/**
1218 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1219 * @dev: Device to thaw.
1220 *
1221 * Thaw a device under the assumption that its pm_domain field points to the
1222 * domain member of an object of type struct generic_pm_domain representing
1223 * a power domain consisting of I/O devices.
1224 */
1225static int pm_genpd_thaw(struct device *dev)
1226{
1227	struct generic_pm_domain *genpd;
1228
1229	dev_dbg(dev, "%s()\n", __func__);
1230
1231	genpd = dev_to_genpd(dev);
1232	if (IS_ERR(genpd))
1233		return -EINVAL;
1234
1235	return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
1236}
1237
1238/**
1239 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1240 * @dev: Device to resume.
1241 *
1242 * Make sure the domain will be in the same power state as before the
1243 * hibernation the system is resuming from and start the device if necessary.
1244 */
1245static int pm_genpd_restore_noirq(struct device *dev)
1246{
1247	struct generic_pm_domain *genpd;
1248
1249	dev_dbg(dev, "%s()\n", __func__);
1250
1251	genpd = dev_to_genpd(dev);
1252	if (IS_ERR(genpd))
1253		return -EINVAL;
1254
1255	/*
1256	 * Since all of the "noirq" callbacks are executed sequentially, it is
1257	 * guaranteed that this function will never run twice in parallel for
1258	 * the same PM domain, so it is not necessary to use locking here.
1259	 *
1260	 * At this point suspended_count == 0 means we are being run for the
1261	 * first time for the given domain in the present cycle.
1262	 */
1263	if (genpd->suspended_count++ == 0) {
1264		/*
1265		 * The boot kernel might put the domain into arbitrary state,
1266		 * so make it appear as powered off to pm_genpd_sync_poweron(),
1267		 * so that it tries to power it on in case it was really off.
1268		 */
1269		genpd->status = GPD_STATE_POWER_OFF;
1270		if (genpd->suspend_power_off) {
1271			/*
1272			 * If the domain was off before the hibernation, make
1273			 * sure it will be off going forward.
1274			 */
1275			if (genpd->power_off)
1276				genpd->power_off(genpd);
1277
1278			return 0;
1279		}
1280	}
1281
1282	if (genpd->suspend_power_off)
1283		return 0;
1284
1285	pm_genpd_sync_poweron(genpd);
1286
1287	return genpd_start_dev(genpd, dev);
1288}
1289
1290/**
1291 * pm_genpd_complete - Complete power transition of a device in a power domain.
1292 * @dev: Device to complete the transition of.
1293 *
1294 * Complete a power transition of a device (during a system-wide power
1295 * transition) under the assumption that its pm_domain field points to the
1296 * domain member of an object of type struct generic_pm_domain representing
1297 * a power domain consisting of I/O devices.
1298 */
1299static void pm_genpd_complete(struct device *dev)
1300{
1301	struct generic_pm_domain *genpd;
1302	bool run_complete;
1303
1304	dev_dbg(dev, "%s()\n", __func__);
1305
1306	genpd = dev_to_genpd(dev);
1307	if (IS_ERR(genpd))
1308		return;
1309
1310	mutex_lock(&genpd->lock);
1311
1312	run_complete = !genpd->suspend_power_off;
1313	if (--genpd->prepared_count == 0)
1314		genpd->suspend_power_off = false;
1315
1316	mutex_unlock(&genpd->lock);
1317
1318	if (run_complete) {
1319		pm_generic_complete(dev);
1320		pm_runtime_set_active(dev);
1321		pm_runtime_enable(dev);
1322		pm_runtime_idle(dev);
1323	}
1324}
1325
1326/**
1327 * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
1328 * @dev: Device that normally is marked as "always on" to switch power for.
1329 *
1330 * This routine may only be called during the system core (syscore) suspend or
1331 * resume phase for devices whose "always on" flags are set.
1332 */
1333void pm_genpd_syscore_switch(struct device *dev, bool suspend)
1334{
1335	struct generic_pm_domain *genpd;
1336
1337	genpd = dev_to_genpd(dev);
1338	if (!pm_genpd_present(genpd))
1339		return;
1340
1341	if (suspend) {
1342		genpd->suspended_count++;
1343		pm_genpd_sync_poweroff(genpd);
1344	} else {
1345		pm_genpd_sync_poweron(genpd);
1346		genpd->suspended_count--;
1347	}
1348}
1349EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
1350
1351#else
1352
1353#define pm_genpd_prepare		NULL
1354#define pm_genpd_suspend		NULL
1355#define pm_genpd_suspend_late		NULL
1356#define pm_genpd_suspend_noirq		NULL
1357#define pm_genpd_resume_early		NULL
1358#define pm_genpd_resume_noirq		NULL
1359#define pm_genpd_resume			NULL
1360#define pm_genpd_freeze			NULL
1361#define pm_genpd_freeze_late		NULL
1362#define pm_genpd_freeze_noirq		NULL
1363#define pm_genpd_thaw_early		NULL
1364#define pm_genpd_thaw_noirq		NULL
1365#define pm_genpd_thaw			NULL
1366#define pm_genpd_restore_noirq		NULL
1367#define pm_genpd_complete		NULL
1368
1369#endif /* CONFIG_PM_SLEEP */
1370
1371static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1372{
1373	struct generic_pm_domain_data *gpd_data;
1374
1375	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1376	if (!gpd_data)
1377		return NULL;
1378
1379	mutex_init(&gpd_data->lock);
1380	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1381	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1382	return gpd_data;
1383}
1384
1385static void __pm_genpd_free_dev_data(struct device *dev,
1386				     struct generic_pm_domain_data *gpd_data)
1387{
1388	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1389	kfree(gpd_data);
1390}
1391
1392/**
1393 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1394 * @genpd: PM domain to add the device to.
1395 * @dev: Device to be added.
1396 * @td: Set of PM QoS timing parameters to attach to the device.
1397 */
1398int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1399			  struct gpd_timing_data *td)
1400{
1401	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1402	struct pm_domain_data *pdd;
1403	int ret = 0;
1404
1405	dev_dbg(dev, "%s()\n", __func__);
1406
1407	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1408		return -EINVAL;
1409
1410	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1411	if (!gpd_data_new)
1412		return -ENOMEM;
1413
1414	genpd_acquire_lock(genpd);
1415
1416	if (genpd->prepared_count > 0) {
1417		ret = -EAGAIN;
1418		goto out;
1419	}
1420
1421	list_for_each_entry(pdd, &genpd->dev_list, list_node)
1422		if (pdd->dev == dev) {
1423			ret = -EINVAL;
1424			goto out;
1425		}
1426
1427	ret = dev_pm_get_subsys_data(dev);
1428	if (ret)
1429		goto out;
1430
1431	genpd->device_count++;
1432	genpd->max_off_time_changed = true;
1433
1434	spin_lock_irq(&dev->power.lock);
1435
1436	dev->pm_domain = &genpd->domain;
1437	if (dev->power.subsys_data->domain_data) {
1438		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1439	} else {
1440		gpd_data = gpd_data_new;
1441		dev->power.subsys_data->domain_data = &gpd_data->base;
1442	}
1443	gpd_data->refcount++;
1444	if (td)
1445		gpd_data->td = *td;
1446
1447	spin_unlock_irq(&dev->power.lock);
1448
1449	mutex_lock(&gpd_data->lock);
1450	gpd_data->base.dev = dev;
1451	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1452	gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
1453	gpd_data->td.constraint_changed = true;
1454	gpd_data->td.effective_constraint_ns = -1;
1455	mutex_unlock(&gpd_data->lock);
1456
1457 out:
1458	genpd_release_lock(genpd);
1459
1460	if (gpd_data != gpd_data_new)
1461		__pm_genpd_free_dev_data(dev, gpd_data_new);
1462
1463	return ret;
1464}
1465
1466/**
1467 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1468 * @genpd_node: Device tree node pointer representing a PM domain to which the
1469 *   the device is added to.
1470 * @dev: Device to be added.
1471 * @td: Set of PM QoS timing parameters to attach to the device.
1472 */
1473int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1474			     struct gpd_timing_data *td)
1475{
1476	struct generic_pm_domain *genpd = NULL, *gpd;
1477
1478	dev_dbg(dev, "%s()\n", __func__);
1479
1480	if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1481		return -EINVAL;
1482
1483	mutex_lock(&gpd_list_lock);
1484	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1485		if (gpd->of_node == genpd_node) {
1486			genpd = gpd;
1487			break;
1488		}
1489	}
1490	mutex_unlock(&gpd_list_lock);
1491
1492	if (!genpd)
1493		return -EINVAL;
1494
1495	return __pm_genpd_add_device(genpd, dev, td);
1496}
1497
1498
1499/**
1500 * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1501 * @domain_name: Name of the PM domain to add the device to.
1502 * @dev: Device to be added.
1503 * @td: Set of PM QoS timing parameters to attach to the device.
1504 */
1505int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1506			       struct gpd_timing_data *td)
1507{
1508	return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1509}
1510
1511/**
1512 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1513 * @genpd: PM domain to remove the device from.
1514 * @dev: Device to be removed.
1515 */
1516int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1517			   struct device *dev)
1518{
1519	struct generic_pm_domain_data *gpd_data;
1520	struct pm_domain_data *pdd;
1521	bool remove = false;
1522	int ret = 0;
1523
1524	dev_dbg(dev, "%s()\n", __func__);
1525
1526	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1527	    ||  IS_ERR_OR_NULL(dev->pm_domain)
1528	    ||  pd_to_genpd(dev->pm_domain) != genpd)
1529		return -EINVAL;
1530
1531	genpd_acquire_lock(genpd);
1532
1533	if (genpd->prepared_count > 0) {
1534		ret = -EAGAIN;
1535		goto out;
1536	}
1537
1538	genpd->device_count--;
1539	genpd->max_off_time_changed = true;
1540
1541	spin_lock_irq(&dev->power.lock);
1542
1543	dev->pm_domain = NULL;
1544	pdd = dev->power.subsys_data->domain_data;
1545	list_del_init(&pdd->list_node);
1546	gpd_data = to_gpd_data(pdd);
1547	if (--gpd_data->refcount == 0) {
1548		dev->power.subsys_data->domain_data = NULL;
1549		remove = true;
1550	}
1551
1552	spin_unlock_irq(&dev->power.lock);
1553
1554	mutex_lock(&gpd_data->lock);
1555	pdd->dev = NULL;
1556	mutex_unlock(&gpd_data->lock);
1557
1558	genpd_release_lock(genpd);
1559
1560	dev_pm_put_subsys_data(dev);
1561	if (remove)
1562		__pm_genpd_free_dev_data(dev, gpd_data);
1563
1564	return 0;
1565
1566 out:
1567	genpd_release_lock(genpd);
1568
1569	return ret;
1570}
1571
1572/**
1573 * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1574 * @dev: Device to set/unset the flag for.
1575 * @val: The new value of the device's "need restore" flag.
1576 */
1577void pm_genpd_dev_need_restore(struct device *dev, bool val)
1578{
1579	struct pm_subsys_data *psd;
1580	unsigned long flags;
1581
1582	spin_lock_irqsave(&dev->power.lock, flags);
1583
1584	psd = dev_to_psd(dev);
1585	if (psd && psd->domain_data)
1586		to_gpd_data(psd->domain_data)->need_restore = val;
1587
1588	spin_unlock_irqrestore(&dev->power.lock, flags);
1589}
1590EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1591
1592/**
1593 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1594 * @genpd: Master PM domain to add the subdomain to.
1595 * @subdomain: Subdomain to be added.
1596 */
1597int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1598			   struct generic_pm_domain *subdomain)
1599{
1600	struct gpd_link *link;
1601	int ret = 0;
1602
1603	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1604	    || genpd == subdomain)
1605		return -EINVAL;
1606
1607 start:
1608	genpd_acquire_lock(genpd);
1609	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1610
1611	if (subdomain->status != GPD_STATE_POWER_OFF
1612	    && subdomain->status != GPD_STATE_ACTIVE) {
1613		mutex_unlock(&subdomain->lock);
1614		genpd_release_lock(genpd);
1615		goto start;
1616	}
1617
1618	if (genpd->status == GPD_STATE_POWER_OFF
1619	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
1620		ret = -EINVAL;
1621		goto out;
1622	}
1623
1624	list_for_each_entry(link, &genpd->master_links, master_node) {
1625		if (link->slave == subdomain && link->master == genpd) {
1626			ret = -EINVAL;
1627			goto out;
1628		}
1629	}
1630
1631	link = kzalloc(sizeof(*link), GFP_KERNEL);
1632	if (!link) {
1633		ret = -ENOMEM;
1634		goto out;
1635	}
1636	link->master = genpd;
1637	list_add_tail(&link->master_node, &genpd->master_links);
1638	link->slave = subdomain;
1639	list_add_tail(&link->slave_node, &subdomain->slave_links);
1640	if (subdomain->status != GPD_STATE_POWER_OFF)
1641		genpd_sd_counter_inc(genpd);
1642
1643 out:
1644	mutex_unlock(&subdomain->lock);
1645	genpd_release_lock(genpd);
1646
1647	return ret;
1648}
1649
1650/**
1651 * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1652 * @master_name: Name of the master PM domain to add the subdomain to.
1653 * @subdomain_name: Name of the subdomain to be added.
1654 */
1655int pm_genpd_add_subdomain_names(const char *master_name,
1656				 const char *subdomain_name)
1657{
1658	struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1659
1660	if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1661		return -EINVAL;
1662
1663	mutex_lock(&gpd_list_lock);
1664	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1665		if (!master && !strcmp(gpd->name, master_name))
1666			master = gpd;
1667
1668		if (!subdomain && !strcmp(gpd->name, subdomain_name))
1669			subdomain = gpd;
1670
1671		if (master && subdomain)
1672			break;
1673	}
1674	mutex_unlock(&gpd_list_lock);
1675
1676	return pm_genpd_add_subdomain(master, subdomain);
1677}
1678
1679/**
1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1681 * @genpd: Master PM domain to remove the subdomain from.
1682 * @subdomain: Subdomain to be removed.
1683 */
1684int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1685			      struct generic_pm_domain *subdomain)
1686{
1687	struct gpd_link *link;
1688	int ret = -EINVAL;
1689
1690	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1691		return -EINVAL;
1692
1693 start:
1694	genpd_acquire_lock(genpd);
1695
1696	list_for_each_entry(link, &genpd->master_links, master_node) {
1697		if (link->slave != subdomain)
1698			continue;
1699
1700		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1701
1702		if (subdomain->status != GPD_STATE_POWER_OFF
1703		    && subdomain->status != GPD_STATE_ACTIVE) {
1704			mutex_unlock(&subdomain->lock);
1705			genpd_release_lock(genpd);
1706			goto start;
1707		}
1708
1709		list_del(&link->master_node);
1710		list_del(&link->slave_node);
1711		kfree(link);
1712		if (subdomain->status != GPD_STATE_POWER_OFF)
1713			genpd_sd_counter_dec(genpd);
1714
1715		mutex_unlock(&subdomain->lock);
1716
1717		ret = 0;
1718		break;
1719	}
1720
1721	genpd_release_lock(genpd);
1722
1723	return ret;
1724}
1725
1726/**
1727 * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
1728 * @dev: Device to add the callbacks to.
1729 * @ops: Set of callbacks to add.
1730 * @td: Timing data to add to the device along with the callbacks (optional).
1731 *
1732 * Every call to this routine should be balanced with a call to
1733 * __pm_genpd_remove_callbacks() and they must not be nested.
1734 */
1735int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
1736			   struct gpd_timing_data *td)
1737{
1738	struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1739	int ret = 0;
1740
1741	if (!(dev && ops))
1742		return -EINVAL;
1743
1744	gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1745	if (!gpd_data_new)
1746		return -ENOMEM;
1747
1748	pm_runtime_disable(dev);
1749	device_pm_lock();
1750
1751	ret = dev_pm_get_subsys_data(dev);
1752	if (ret)
1753		goto out;
1754
1755	spin_lock_irq(&dev->power.lock);
1756
1757	if (dev->power.subsys_data->domain_data) {
1758		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1759	} else {
1760		gpd_data = gpd_data_new;
1761		dev->power.subsys_data->domain_data = &gpd_data->base;
1762	}
1763	gpd_data->refcount++;
1764	gpd_data->ops = *ops;
1765	if (td)
1766		gpd_data->td = *td;
1767
1768	spin_unlock_irq(&dev->power.lock);
1769
1770 out:
1771	device_pm_unlock();
1772	pm_runtime_enable(dev);
1773
1774	if (gpd_data != gpd_data_new)
1775		__pm_genpd_free_dev_data(dev, gpd_data_new);
1776
1777	return ret;
1778}
1779EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
1780
1781/**
1782 * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
1783 * @dev: Device to remove the callbacks from.
1784 * @clear_td: If set, clear the device's timing data too.
1785 *
1786 * This routine can only be called after pm_genpd_add_callbacks().
1787 */
1788int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1789{
1790	struct generic_pm_domain_data *gpd_data = NULL;
1791	bool remove = false;
1792	int ret = 0;
1793
1794	if (!(dev && dev->power.subsys_data))
1795		return -EINVAL;
1796
1797	pm_runtime_disable(dev);
1798	device_pm_lock();
1799
1800	spin_lock_irq(&dev->power.lock);
1801
1802	if (dev->power.subsys_data->domain_data) {
1803		gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1804		gpd_data->ops = (struct gpd_dev_ops){ NULL };
1805		if (clear_td)
1806			gpd_data->td = (struct gpd_timing_data){ 0 };
1807
1808		if (--gpd_data->refcount == 0) {
1809			dev->power.subsys_data->domain_data = NULL;
1810			remove = true;
1811		}
1812	} else {
1813		ret = -EINVAL;
1814	}
1815
1816	spin_unlock_irq(&dev->power.lock);
1817
1818	device_pm_unlock();
1819	pm_runtime_enable(dev);
1820
1821	if (ret)
1822		return ret;
1823
1824	dev_pm_put_subsys_data(dev);
1825	if (remove)
1826		__pm_genpd_free_dev_data(dev, gpd_data);
1827
1828	return 0;
1829}
1830EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1831
1832/**
1833 * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1834 * @genpd: PM domain to be connected with cpuidle.
1835 * @state: cpuidle state this domain can disable/enable.
1836 *
1837 * Make a PM domain behave as though it contained a CPU core, that is, instead
1838 * of calling its power down routine it will enable the given cpuidle state so
1839 * that the cpuidle subsystem can power it down (if possible and desirable).
1840 */
1841int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1842{
1843	struct cpuidle_driver *cpuidle_drv;
1844	struct gpd_cpu_data *cpu_data;
1845	struct cpuidle_state *idle_state;
1846	int ret = 0;
1847
1848	if (IS_ERR_OR_NULL(genpd) || state < 0)
1849		return -EINVAL;
1850
1851	genpd_acquire_lock(genpd);
1852
1853	if (genpd->cpu_data) {
1854		ret = -EEXIST;
1855		goto out;
1856	}
1857	cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1858	if (!cpu_data) {
1859		ret = -ENOMEM;
1860		goto out;
1861	}
1862	cpuidle_drv = cpuidle_driver_ref();
1863	if (!cpuidle_drv) {
1864		ret = -ENODEV;
1865		goto out;
1866	}
1867	if (cpuidle_drv->state_count <= state) {
1868		ret = -EINVAL;
1869		goto err;
1870	}
1871	idle_state = &cpuidle_drv->states[state];
1872	if (!idle_state->disabled) {
1873		ret = -EAGAIN;
1874		goto err;
1875	}
1876	cpu_data->idle_state = idle_state;
1877	cpu_data->saved_exit_latency = idle_state->exit_latency;
1878	genpd->cpu_data = cpu_data;
1879	genpd_recalc_cpu_exit_latency(genpd);
1880
1881 out:
1882	genpd_release_lock(genpd);
1883	return ret;
1884
1885 err:
1886	cpuidle_driver_unref();
1887	goto out;
1888}
1889
1890/**
1891 * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1892 * @name: Name of the domain to connect to cpuidle.
1893 * @state: cpuidle state this domain can manipulate.
1894 */
1895int pm_genpd_name_attach_cpuidle(const char *name, int state)
1896{
1897	return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1898}
1899
1900/**
1901 * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1902 * @genpd: PM domain to remove the cpuidle connection from.
1903 *
1904 * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1905 * given PM domain.
1906 */
1907int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1908{
1909	struct gpd_cpu_data *cpu_data;
1910	struct cpuidle_state *idle_state;
1911	int ret = 0;
1912
1913	if (IS_ERR_OR_NULL(genpd))
1914		return -EINVAL;
1915
1916	genpd_acquire_lock(genpd);
1917
1918	cpu_data = genpd->cpu_data;
1919	if (!cpu_data) {
1920		ret = -ENODEV;
1921		goto out;
1922	}
1923	idle_state = cpu_data->idle_state;
1924	if (!idle_state->disabled) {
1925		ret = -EAGAIN;
1926		goto out;
1927	}
1928	idle_state->exit_latency = cpu_data->saved_exit_latency;
1929	cpuidle_driver_unref();
1930	genpd->cpu_data = NULL;
1931	kfree(cpu_data);
1932
1933 out:
1934	genpd_release_lock(genpd);
1935	return ret;
1936}
1937
1938/**
1939 * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1940 * @name: Name of the domain to disconnect cpuidle from.
1941 */
1942int pm_genpd_name_detach_cpuidle(const char *name)
1943{
1944	return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1945}
1946
1947/* Default device callbacks for generic PM domains. */
1948
1949/**
1950 * pm_genpd_default_save_state - Default "save device state" for PM domians.
1951 * @dev: Device to handle.
1952 */
1953static int pm_genpd_default_save_state(struct device *dev)
1954{
1955	int (*cb)(struct device *__dev);
1956
1957	cb = dev_gpd_data(dev)->ops.save_state;
1958	if (cb)
1959		return cb(dev);
1960
1961	if (dev->type && dev->type->pm)
1962		cb = dev->type->pm->runtime_suspend;
1963	else if (dev->class && dev->class->pm)
1964		cb = dev->class->pm->runtime_suspend;
1965	else if (dev->bus && dev->bus->pm)
1966		cb = dev->bus->pm->runtime_suspend;
1967	else
1968		cb = NULL;
1969
1970	if (!cb && dev->driver && dev->driver->pm)
1971		cb = dev->driver->pm->runtime_suspend;
1972
1973	return cb ? cb(dev) : 0;
1974}
1975
1976/**
1977 * pm_genpd_default_restore_state - Default PM domians "restore device state".
1978 * @dev: Device to handle.
1979 */
1980static int pm_genpd_default_restore_state(struct device *dev)
1981{
1982	int (*cb)(struct device *__dev);
1983
1984	cb = dev_gpd_data(dev)->ops.restore_state;
1985	if (cb)
1986		return cb(dev);
1987
1988	if (dev->type && dev->type->pm)
1989		cb = dev->type->pm->runtime_resume;
1990	else if (dev->class && dev->class->pm)
1991		cb = dev->class->pm->runtime_resume;
1992	else if (dev->bus && dev->bus->pm)
1993		cb = dev->bus->pm->runtime_resume;
1994	else
1995		cb = NULL;
1996
1997	if (!cb && dev->driver && dev->driver->pm)
1998		cb = dev->driver->pm->runtime_resume;
1999
2000	return cb ? cb(dev) : 0;
2001}
2002
2003#ifdef CONFIG_PM_SLEEP
2004
2005/**
2006 * pm_genpd_default_suspend - Default "device suspend" for PM domians.
2007 * @dev: Device to handle.
2008 */
2009static int pm_genpd_default_suspend(struct device *dev)
2010{
2011	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
2012
2013	return cb ? cb(dev) : pm_generic_suspend(dev);
2014}
2015
2016/**
2017 * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
2018 * @dev: Device to handle.
2019 */
2020static int pm_genpd_default_suspend_late(struct device *dev)
2021{
2022	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
2023
2024	return cb ? cb(dev) : pm_generic_suspend_late(dev);
2025}
2026
2027/**
2028 * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
2029 * @dev: Device to handle.
2030 */
2031static int pm_genpd_default_resume_early(struct device *dev)
2032{
2033	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
2034
2035	return cb ? cb(dev) : pm_generic_resume_early(dev);
2036}
2037
2038/**
2039 * pm_genpd_default_resume - Default "device resume" for PM domians.
2040 * @dev: Device to handle.
2041 */
2042static int pm_genpd_default_resume(struct device *dev)
2043{
2044	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
2045
2046	return cb ? cb(dev) : pm_generic_resume(dev);
2047}
2048
2049/**
2050 * pm_genpd_default_freeze - Default "device freeze" for PM domians.
2051 * @dev: Device to handle.
2052 */
2053static int pm_genpd_default_freeze(struct device *dev)
2054{
2055	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
2056
2057	return cb ? cb(dev) : pm_generic_freeze(dev);
2058}
2059
2060/**
2061 * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
2062 * @dev: Device to handle.
2063 */
2064static int pm_genpd_default_freeze_late(struct device *dev)
2065{
2066	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
2067
2068	return cb ? cb(dev) : pm_generic_freeze_late(dev);
2069}
2070
2071/**
2072 * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
2073 * @dev: Device to handle.
2074 */
2075static int pm_genpd_default_thaw_early(struct device *dev)
2076{
2077	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
2078
2079	return cb ? cb(dev) : pm_generic_thaw_early(dev);
2080}
2081
2082/**
2083 * pm_genpd_default_thaw - Default "device thaw" for PM domians.
2084 * @dev: Device to handle.
2085 */
2086static int pm_genpd_default_thaw(struct device *dev)
2087{
2088	int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
2089
2090	return cb ? cb(dev) : pm_generic_thaw(dev);
2091}
2092
2093#else /* !CONFIG_PM_SLEEP */
2094
2095#define pm_genpd_default_suspend	NULL
2096#define pm_genpd_default_suspend_late	NULL
2097#define pm_genpd_default_resume_early	NULL
2098#define pm_genpd_default_resume		NULL
2099#define pm_genpd_default_freeze		NULL
2100#define pm_genpd_default_freeze_late	NULL
2101#define pm_genpd_default_thaw_early	NULL
2102#define pm_genpd_default_thaw		NULL
2103
2104#endif /* !CONFIG_PM_SLEEP */
2105
2106/**
2107 * pm_genpd_init - Initialize a generic I/O PM domain object.
2108 * @genpd: PM domain object to initialize.
2109 * @gov: PM domain governor to associate with the domain (may be NULL).
2110 * @is_off: Initial value of the domain's power_is_off field.
2111 */
2112void pm_genpd_init(struct generic_pm_domain *genpd,
2113		   struct dev_power_governor *gov, bool is_off)
2114{
2115	if (IS_ERR_OR_NULL(genpd))
2116		return;
2117
2118	INIT_LIST_HEAD(&genpd->master_links);
2119	INIT_LIST_HEAD(&genpd->slave_links);
2120	INIT_LIST_HEAD(&genpd->dev_list);
2121	mutex_init(&genpd->lock);
2122	genpd->gov = gov;
2123	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2124	genpd->in_progress = 0;
2125	atomic_set(&genpd->sd_count, 0);
2126	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
2127	init_waitqueue_head(&genpd->status_wait_queue);
2128	genpd->poweroff_task = NULL;
2129	genpd->resume_count = 0;
2130	genpd->device_count = 0;
2131	genpd->max_off_time_ns = -1;
2132	genpd->max_off_time_changed = true;
2133	genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
2134	genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
2135	genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
2136	genpd->domain.ops.prepare = pm_genpd_prepare;
2137	genpd->domain.ops.suspend = pm_genpd_suspend;
2138	genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
2139	genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
2140	genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
2141	genpd->domain.ops.resume_early = pm_genpd_resume_early;
2142	genpd->domain.ops.resume = pm_genpd_resume;
2143	genpd->domain.ops.freeze = pm_genpd_freeze;
2144	genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
2145	genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
2146	genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
2147	genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
2148	genpd->domain.ops.thaw = pm_genpd_thaw;
2149	genpd->domain.ops.poweroff = pm_genpd_suspend;
2150	genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
2151	genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
2152	genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
2153	genpd->domain.ops.restore_early = pm_genpd_resume_early;
2154	genpd->domain.ops.restore = pm_genpd_resume;
2155	genpd->domain.ops.complete = pm_genpd_complete;
2156	genpd->dev_ops.save_state = pm_genpd_default_save_state;
2157	genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
2158	genpd->dev_ops.suspend = pm_genpd_default_suspend;
2159	genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
2160	genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
2161	genpd->dev_ops.resume = pm_genpd_default_resume;
2162	genpd->dev_ops.freeze = pm_genpd_default_freeze;
2163	genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
2164	genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
2165	genpd->dev_ops.thaw = pm_genpd_default_thaw;
2166	mutex_lock(&gpd_list_lock);
2167	list_add(&genpd->gpd_list_node, &gpd_list);
2168	mutex_unlock(&gpd_list_lock);
2169}
2170