main.c revision 875ab0b74e85d6801a49392447d26e0b28688d86
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/mutex.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25#include <linux/resume-trace.h>
26#include <linux/interrupt.h>
27#include <linux/sched.h>
28
29#include "../base.h"
30#include "power.h"
31
32/*
33 * The entries in the dpm_list list are in a depth first order, simply
34 * because children are guaranteed to be discovered after parents, and
35 * are inserted at the back of the list on discovery.
36 *
37 * Since device_pm_add() may be called with a device semaphore held,
38 * we must never try to acquire a device semaphore while holding
39 * dpm_list_mutex.
40 */
41
42LIST_HEAD(dpm_list);
43
44static DEFINE_MUTEX(dpm_list_mtx);
45
46/*
47 * Set once the preparation of devices for a PM transition has started, reset
48 * before starting to resume devices.  Protected by dpm_list_mtx.
49 */
50static bool transition_started;
51
52/**
53 * device_pm_init - Initialize the PM-related part of a device object.
54 * @dev: Device object being initialized.
55 */
56void device_pm_init(struct device *dev)
57{
58	dev->power.status = DPM_ON;
59	pm_runtime_init(dev);
60}
61
62/**
63 * device_pm_lock - Lock the list of active devices used by the PM core.
64 */
65void device_pm_lock(void)
66{
67	mutex_lock(&dpm_list_mtx);
68}
69
70/**
71 * device_pm_unlock - Unlock the list of active devices used by the PM core.
72 */
73void device_pm_unlock(void)
74{
75	mutex_unlock(&dpm_list_mtx);
76}
77
78/**
79 * device_pm_add - Add a device to the PM core's list of active devices.
80 * @dev: Device to add to the list.
81 */
82void device_pm_add(struct device *dev)
83{
84	pr_debug("PM: Adding info for %s:%s\n",
85		 dev->bus ? dev->bus->name : "No Bus",
86		 kobject_name(&dev->kobj));
87	mutex_lock(&dpm_list_mtx);
88	if (dev->parent) {
89		if (dev->parent->power.status >= DPM_SUSPENDING)
90			dev_warn(dev, "parent %s should not be sleeping\n",
91				 dev_name(dev->parent));
92	} else if (transition_started) {
93		/*
94		 * We refuse to register parentless devices while a PM
95		 * transition is in progress in order to avoid leaving them
96		 * unhandled down the road
97		 */
98		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
99	}
100
101	list_add_tail(&dev->power.entry, &dpm_list);
102	mutex_unlock(&dpm_list_mtx);
103}
104
105/**
106 * device_pm_remove - Remove a device from the PM core's list of active devices.
107 * @dev: Device to be removed from the list.
108 */
109void device_pm_remove(struct device *dev)
110{
111	pr_debug("PM: Removing info for %s:%s\n",
112		 dev->bus ? dev->bus->name : "No Bus",
113		 kobject_name(&dev->kobj));
114	mutex_lock(&dpm_list_mtx);
115	list_del_init(&dev->power.entry);
116	mutex_unlock(&dpm_list_mtx);
117	pm_runtime_remove(dev);
118}
119
120/**
121 * device_pm_move_before - Move device in the PM core's list of active devices.
122 * @deva: Device to move in dpm_list.
123 * @devb: Device @deva should come before.
124 */
125void device_pm_move_before(struct device *deva, struct device *devb)
126{
127	pr_debug("PM: Moving %s:%s before %s:%s\n",
128		 deva->bus ? deva->bus->name : "No Bus",
129		 kobject_name(&deva->kobj),
130		 devb->bus ? devb->bus->name : "No Bus",
131		 kobject_name(&devb->kobj));
132	/* Delete deva from dpm_list and reinsert before devb. */
133	list_move_tail(&deva->power.entry, &devb->power.entry);
134}
135
136/**
137 * device_pm_move_after - Move device in the PM core's list of active devices.
138 * @deva: Device to move in dpm_list.
139 * @devb: Device @deva should come after.
140 */
141void device_pm_move_after(struct device *deva, struct device *devb)
142{
143	pr_debug("PM: Moving %s:%s after %s:%s\n",
144		 deva->bus ? deva->bus->name : "No Bus",
145		 kobject_name(&deva->kobj),
146		 devb->bus ? devb->bus->name : "No Bus",
147		 kobject_name(&devb->kobj));
148	/* Delete deva from dpm_list and reinsert after devb. */
149	list_move(&deva->power.entry, &devb->power.entry);
150}
151
152/**
153 * device_pm_move_last - Move device to end of the PM core's list of devices.
154 * @dev: Device to move in dpm_list.
155 */
156void device_pm_move_last(struct device *dev)
157{
158	pr_debug("PM: Moving %s:%s to end of list\n",
159		 dev->bus ? dev->bus->name : "No Bus",
160		 kobject_name(&dev->kobj));
161	list_move_tail(&dev->power.entry, &dpm_list);
162}
163
164static ktime_t initcall_debug_start(struct device *dev)
165{
166	ktime_t calltime = ktime_set(0, 0);
167
168	if (initcall_debug) {
169		pr_info("calling  %s+ @ %i\n",
170				dev_name(dev), task_pid_nr(current));
171		calltime = ktime_get();
172	}
173
174	return calltime;
175}
176
177static void initcall_debug_report(struct device *dev, ktime_t calltime,
178				  int error)
179{
180	ktime_t delta, rettime;
181
182	if (initcall_debug) {
183		rettime = ktime_get();
184		delta = ktime_sub(rettime, calltime);
185		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
186			error, (unsigned long long)ktime_to_ns(delta) >> 10);
187	}
188}
189
190/**
191 * pm_op - Execute the PM operation appropriate for given PM event.
192 * @dev: Device to handle.
193 * @ops: PM operations to choose from.
194 * @state: PM transition of the system being carried out.
195 */
196static int pm_op(struct device *dev,
197		 const struct dev_pm_ops *ops,
198		 pm_message_t state)
199{
200	int error = 0;
201	ktime_t calltime;
202
203	calltime = initcall_debug_start(dev);
204
205	switch (state.event) {
206#ifdef CONFIG_SUSPEND
207	case PM_EVENT_SUSPEND:
208		if (ops->suspend) {
209			error = ops->suspend(dev);
210			suspend_report_result(ops->suspend, error);
211		}
212		break;
213	case PM_EVENT_RESUME:
214		if (ops->resume) {
215			error = ops->resume(dev);
216			suspend_report_result(ops->resume, error);
217		}
218		break;
219#endif /* CONFIG_SUSPEND */
220#ifdef CONFIG_HIBERNATION
221	case PM_EVENT_FREEZE:
222	case PM_EVENT_QUIESCE:
223		if (ops->freeze) {
224			error = ops->freeze(dev);
225			suspend_report_result(ops->freeze, error);
226		}
227		break;
228	case PM_EVENT_HIBERNATE:
229		if (ops->poweroff) {
230			error = ops->poweroff(dev);
231			suspend_report_result(ops->poweroff, error);
232		}
233		break;
234	case PM_EVENT_THAW:
235	case PM_EVENT_RECOVER:
236		if (ops->thaw) {
237			error = ops->thaw(dev);
238			suspend_report_result(ops->thaw, error);
239		}
240		break;
241	case PM_EVENT_RESTORE:
242		if (ops->restore) {
243			error = ops->restore(dev);
244			suspend_report_result(ops->restore, error);
245		}
246		break;
247#endif /* CONFIG_HIBERNATION */
248	default:
249		error = -EINVAL;
250	}
251
252	initcall_debug_report(dev, calltime, error);
253
254	return error;
255}
256
257/**
258 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
259 * @dev: Device to handle.
260 * @ops: PM operations to choose from.
261 * @state: PM transition of the system being carried out.
262 *
263 * The driver of @dev will not receive interrupts while this function is being
264 * executed.
265 */
266static int pm_noirq_op(struct device *dev,
267			const struct dev_pm_ops *ops,
268			pm_message_t state)
269{
270	int error = 0;
271	ktime_t calltime, delta, rettime;
272
273	if (initcall_debug) {
274		pr_info("calling  %s_i+ @ %i\n",
275				dev_name(dev), task_pid_nr(current));
276		calltime = ktime_get();
277	}
278
279	switch (state.event) {
280#ifdef CONFIG_SUSPEND
281	case PM_EVENT_SUSPEND:
282		if (ops->suspend_noirq) {
283			error = ops->suspend_noirq(dev);
284			suspend_report_result(ops->suspend_noirq, error);
285		}
286		break;
287	case PM_EVENT_RESUME:
288		if (ops->resume_noirq) {
289			error = ops->resume_noirq(dev);
290			suspend_report_result(ops->resume_noirq, error);
291		}
292		break;
293#endif /* CONFIG_SUSPEND */
294#ifdef CONFIG_HIBERNATION
295	case PM_EVENT_FREEZE:
296	case PM_EVENT_QUIESCE:
297		if (ops->freeze_noirq) {
298			error = ops->freeze_noirq(dev);
299			suspend_report_result(ops->freeze_noirq, error);
300		}
301		break;
302	case PM_EVENT_HIBERNATE:
303		if (ops->poweroff_noirq) {
304			error = ops->poweroff_noirq(dev);
305			suspend_report_result(ops->poweroff_noirq, error);
306		}
307		break;
308	case PM_EVENT_THAW:
309	case PM_EVENT_RECOVER:
310		if (ops->thaw_noirq) {
311			error = ops->thaw_noirq(dev);
312			suspend_report_result(ops->thaw_noirq, error);
313		}
314		break;
315	case PM_EVENT_RESTORE:
316		if (ops->restore_noirq) {
317			error = ops->restore_noirq(dev);
318			suspend_report_result(ops->restore_noirq, error);
319		}
320		break;
321#endif /* CONFIG_HIBERNATION */
322	default:
323		error = -EINVAL;
324	}
325
326	if (initcall_debug) {
327		rettime = ktime_get();
328		delta = ktime_sub(rettime, calltime);
329		printk("initcall %s_i+ returned %d after %Ld usecs\n",
330			dev_name(dev), error,
331			(unsigned long long)ktime_to_ns(delta) >> 10);
332	}
333
334	return error;
335}
336
337static char *pm_verb(int event)
338{
339	switch (event) {
340	case PM_EVENT_SUSPEND:
341		return "suspend";
342	case PM_EVENT_RESUME:
343		return "resume";
344	case PM_EVENT_FREEZE:
345		return "freeze";
346	case PM_EVENT_QUIESCE:
347		return "quiesce";
348	case PM_EVENT_HIBERNATE:
349		return "hibernate";
350	case PM_EVENT_THAW:
351		return "thaw";
352	case PM_EVENT_RESTORE:
353		return "restore";
354	case PM_EVENT_RECOVER:
355		return "recover";
356	default:
357		return "(unknown PM event)";
358	}
359}
360
361static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
362{
363	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
364		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
365		", may wakeup" : "");
366}
367
368static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
369			int error)
370{
371	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
372		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
373}
374
375/*------------------------- Resume routines -------------------------*/
376
377/**
378 * device_resume_noirq - Execute an "early resume" callback for given device.
379 * @dev: Device to handle.
380 * @state: PM transition of the system being carried out.
381 *
382 * The driver of @dev will not receive interrupts while this function is being
383 * executed.
384 */
385static int device_resume_noirq(struct device *dev, pm_message_t state)
386{
387	int error = 0;
388
389	TRACE_DEVICE(dev);
390	TRACE_RESUME(0);
391
392	if (dev->bus && dev->bus->pm) {
393		pm_dev_dbg(dev, state, "EARLY ");
394		error = pm_noirq_op(dev, dev->bus->pm, state);
395	}
396
397	TRACE_RESUME(error);
398	return error;
399}
400
401/**
402 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
403 * @state: PM transition of the system being carried out.
404 *
405 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
406 * enable device drivers to receive interrupts.
407 */
408void dpm_resume_noirq(pm_message_t state)
409{
410	struct device *dev;
411
412	mutex_lock(&dpm_list_mtx);
413	transition_started = false;
414	list_for_each_entry(dev, &dpm_list, power.entry)
415		if (dev->power.status > DPM_OFF) {
416			int error;
417
418			dev->power.status = DPM_OFF;
419			error = device_resume_noirq(dev, state);
420			if (error)
421				pm_dev_err(dev, state, " early", error);
422		}
423	mutex_unlock(&dpm_list_mtx);
424	resume_device_irqs();
425}
426EXPORT_SYMBOL_GPL(dpm_resume_noirq);
427
428/**
429 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
430 * dev: Device to resume.
431 * cb: Resume callback to execute.
432 */
433static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
434{
435	int error;
436	ktime_t calltime;
437
438	calltime = initcall_debug_start(dev);
439
440	error = cb(dev);
441	suspend_report_result(cb, error);
442
443	initcall_debug_report(dev, calltime, error);
444
445	return error;
446}
447
448/**
449 * device_resume - Execute "resume" callbacks for given device.
450 * @dev: Device to handle.
451 * @state: PM transition of the system being carried out.
452 */
453static int device_resume(struct device *dev, pm_message_t state)
454{
455	int error = 0;
456
457	TRACE_DEVICE(dev);
458	TRACE_RESUME(0);
459
460	down(&dev->sem);
461
462	if (dev->bus) {
463		if (dev->bus->pm) {
464			pm_dev_dbg(dev, state, "");
465			error = pm_op(dev, dev->bus->pm, state);
466		} else if (dev->bus->resume) {
467			pm_dev_dbg(dev, state, "legacy ");
468			error = legacy_resume(dev, dev->bus->resume);
469		}
470		if (error)
471			goto End;
472	}
473
474	if (dev->type) {
475		if (dev->type->pm) {
476			pm_dev_dbg(dev, state, "type ");
477			error = pm_op(dev, dev->type->pm, state);
478		}
479		if (error)
480			goto End;
481	}
482
483	if (dev->class) {
484		if (dev->class->pm) {
485			pm_dev_dbg(dev, state, "class ");
486			error = pm_op(dev, dev->class->pm, state);
487		} else if (dev->class->resume) {
488			pm_dev_dbg(dev, state, "legacy class ");
489			error = legacy_resume(dev, dev->class->resume);
490		}
491	}
492 End:
493	up(&dev->sem);
494
495	TRACE_RESUME(error);
496	return error;
497}
498
499/**
500 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
501 * @state: PM transition of the system being carried out.
502 *
503 * Execute the appropriate "resume" callback for all devices whose status
504 * indicates that they are suspended.
505 */
506static void dpm_resume(pm_message_t state)
507{
508	struct list_head list;
509
510	INIT_LIST_HEAD(&list);
511	mutex_lock(&dpm_list_mtx);
512	while (!list_empty(&dpm_list)) {
513		struct device *dev = to_device(dpm_list.next);
514
515		get_device(dev);
516		if (dev->power.status >= DPM_OFF) {
517			int error;
518
519			dev->power.status = DPM_RESUMING;
520			mutex_unlock(&dpm_list_mtx);
521
522			error = device_resume(dev, state);
523
524			mutex_lock(&dpm_list_mtx);
525			if (error)
526				pm_dev_err(dev, state, "", error);
527		} else if (dev->power.status == DPM_SUSPENDING) {
528			/* Allow new children of the device to be registered */
529			dev->power.status = DPM_RESUMING;
530		}
531		if (!list_empty(&dev->power.entry))
532			list_move_tail(&dev->power.entry, &list);
533		put_device(dev);
534	}
535	list_splice(&list, &dpm_list);
536	mutex_unlock(&dpm_list_mtx);
537}
538
539/**
540 * device_complete - Complete a PM transition for given device.
541 * @dev: Device to handle.
542 * @state: PM transition of the system being carried out.
543 */
544static void device_complete(struct device *dev, pm_message_t state)
545{
546	down(&dev->sem);
547
548	if (dev->class && dev->class->pm && dev->class->pm->complete) {
549		pm_dev_dbg(dev, state, "completing class ");
550		dev->class->pm->complete(dev);
551	}
552
553	if (dev->type && dev->type->pm && dev->type->pm->complete) {
554		pm_dev_dbg(dev, state, "completing type ");
555		dev->type->pm->complete(dev);
556	}
557
558	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
559		pm_dev_dbg(dev, state, "completing ");
560		dev->bus->pm->complete(dev);
561	}
562
563	up(&dev->sem);
564}
565
566/**
567 * dpm_complete - Complete a PM transition for all non-sysdev devices.
568 * @state: PM transition of the system being carried out.
569 *
570 * Execute the ->complete() callbacks for all devices whose PM status is not
571 * DPM_ON (this allows new devices to be registered).
572 */
573static void dpm_complete(pm_message_t state)
574{
575	struct list_head list;
576
577	INIT_LIST_HEAD(&list);
578	mutex_lock(&dpm_list_mtx);
579	transition_started = false;
580	while (!list_empty(&dpm_list)) {
581		struct device *dev = to_device(dpm_list.prev);
582
583		get_device(dev);
584		if (dev->power.status > DPM_ON) {
585			dev->power.status = DPM_ON;
586			mutex_unlock(&dpm_list_mtx);
587
588			device_complete(dev, state);
589			pm_runtime_put_noidle(dev);
590
591			mutex_lock(&dpm_list_mtx);
592		}
593		if (!list_empty(&dev->power.entry))
594			list_move(&dev->power.entry, &list);
595		put_device(dev);
596	}
597	list_splice(&list, &dpm_list);
598	mutex_unlock(&dpm_list_mtx);
599}
600
601/**
602 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
603 * @state: PM transition of the system being carried out.
604 *
605 * Execute "resume" callbacks for all devices and complete the PM transition of
606 * the system.
607 */
608void dpm_resume_end(pm_message_t state)
609{
610	might_sleep();
611	dpm_resume(state);
612	dpm_complete(state);
613}
614EXPORT_SYMBOL_GPL(dpm_resume_end);
615
616
617/*------------------------- Suspend routines -------------------------*/
618
619/**
620 * resume_event - Return a "resume" message for given "suspend" sleep state.
621 * @sleep_state: PM message representing a sleep state.
622 *
623 * Return a PM message representing the resume event corresponding to given
624 * sleep state.
625 */
626static pm_message_t resume_event(pm_message_t sleep_state)
627{
628	switch (sleep_state.event) {
629	case PM_EVENT_SUSPEND:
630		return PMSG_RESUME;
631	case PM_EVENT_FREEZE:
632	case PM_EVENT_QUIESCE:
633		return PMSG_RECOVER;
634	case PM_EVENT_HIBERNATE:
635		return PMSG_RESTORE;
636	}
637	return PMSG_ON;
638}
639
640/**
641 * device_suspend_noirq - Execute a "late suspend" callback for given device.
642 * @dev: Device to handle.
643 * @state: PM transition of the system being carried out.
644 *
645 * The driver of @dev will not receive interrupts while this function is being
646 * executed.
647 */
648static int device_suspend_noirq(struct device *dev, pm_message_t state)
649{
650	int error = 0;
651
652	if (dev->bus && dev->bus->pm) {
653		pm_dev_dbg(dev, state, "LATE ");
654		error = pm_noirq_op(dev, dev->bus->pm, state);
655	}
656	return error;
657}
658
659/**
660 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
661 * @state: PM transition of the system being carried out.
662 *
663 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
664 * handlers for all non-sysdev devices.
665 */
666int dpm_suspend_noirq(pm_message_t state)
667{
668	struct device *dev;
669	int error = 0;
670
671	suspend_device_irqs();
672	mutex_lock(&dpm_list_mtx);
673	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
674		error = device_suspend_noirq(dev, state);
675		if (error) {
676			pm_dev_err(dev, state, " late", error);
677			break;
678		}
679		dev->power.status = DPM_OFF_IRQ;
680	}
681	mutex_unlock(&dpm_list_mtx);
682	if (error)
683		dpm_resume_noirq(resume_event(state));
684	return error;
685}
686EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
687
688/**
689 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
690 * dev: Device to suspend.
691 * cb: Suspend callback to execute.
692 */
693static int legacy_suspend(struct device *dev, pm_message_t state,
694			  int (*cb)(struct device *dev, pm_message_t state))
695{
696	int error;
697	ktime_t calltime;
698
699	calltime = initcall_debug_start(dev);
700
701	error = cb(dev, state);
702	suspend_report_result(cb, error);
703
704	initcall_debug_report(dev, calltime, error);
705
706	return error;
707}
708
709/**
710 * device_suspend - Execute "suspend" callbacks for given device.
711 * @dev: Device to handle.
712 * @state: PM transition of the system being carried out.
713 */
714static int device_suspend(struct device *dev, pm_message_t state)
715{
716	int error = 0;
717
718	down(&dev->sem);
719
720	if (dev->class) {
721		if (dev->class->pm) {
722			pm_dev_dbg(dev, state, "class ");
723			error = pm_op(dev, dev->class->pm, state);
724		} else if (dev->class->suspend) {
725			pm_dev_dbg(dev, state, "legacy class ");
726			error = legacy_suspend(dev, state, dev->class->suspend);
727		}
728		if (error)
729			goto End;
730	}
731
732	if (dev->type) {
733		if (dev->type->pm) {
734			pm_dev_dbg(dev, state, "type ");
735			error = pm_op(dev, dev->type->pm, state);
736		}
737		if (error)
738			goto End;
739	}
740
741	if (dev->bus) {
742		if (dev->bus->pm) {
743			pm_dev_dbg(dev, state, "");
744			error = pm_op(dev, dev->bus->pm, state);
745		} else if (dev->bus->suspend) {
746			pm_dev_dbg(dev, state, "legacy ");
747			error = legacy_suspend(dev, state, dev->bus->suspend);
748		}
749	}
750 End:
751	up(&dev->sem);
752
753	return error;
754}
755
756/**
757 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
758 * @state: PM transition of the system being carried out.
759 */
760static int dpm_suspend(pm_message_t state)
761{
762	struct list_head list;
763	int error = 0;
764
765	INIT_LIST_HEAD(&list);
766	mutex_lock(&dpm_list_mtx);
767	while (!list_empty(&dpm_list)) {
768		struct device *dev = to_device(dpm_list.prev);
769
770		get_device(dev);
771		mutex_unlock(&dpm_list_mtx);
772
773		error = device_suspend(dev, state);
774
775		mutex_lock(&dpm_list_mtx);
776		if (error) {
777			pm_dev_err(dev, state, "", error);
778			put_device(dev);
779			break;
780		}
781		dev->power.status = DPM_OFF;
782		if (!list_empty(&dev->power.entry))
783			list_move(&dev->power.entry, &list);
784		put_device(dev);
785	}
786	list_splice(&list, dpm_list.prev);
787	mutex_unlock(&dpm_list_mtx);
788	return error;
789}
790
791/**
792 * device_prepare - Prepare a device for system power transition.
793 * @dev: Device to handle.
794 * @state: PM transition of the system being carried out.
795 *
796 * Execute the ->prepare() callback(s) for given device.  No new children of the
797 * device may be registered after this function has returned.
798 */
799static int device_prepare(struct device *dev, pm_message_t state)
800{
801	int error = 0;
802
803	down(&dev->sem);
804
805	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
806		pm_dev_dbg(dev, state, "preparing ");
807		error = dev->bus->pm->prepare(dev);
808		suspend_report_result(dev->bus->pm->prepare, error);
809		if (error)
810			goto End;
811	}
812
813	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
814		pm_dev_dbg(dev, state, "preparing type ");
815		error = dev->type->pm->prepare(dev);
816		suspend_report_result(dev->type->pm->prepare, error);
817		if (error)
818			goto End;
819	}
820
821	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
822		pm_dev_dbg(dev, state, "preparing class ");
823		error = dev->class->pm->prepare(dev);
824		suspend_report_result(dev->class->pm->prepare, error);
825	}
826 End:
827	up(&dev->sem);
828
829	return error;
830}
831
832/**
833 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
834 * @state: PM transition of the system being carried out.
835 *
836 * Execute the ->prepare() callback(s) for all devices.
837 */
838static int dpm_prepare(pm_message_t state)
839{
840	struct list_head list;
841	int error = 0;
842
843	INIT_LIST_HEAD(&list);
844	mutex_lock(&dpm_list_mtx);
845	transition_started = true;
846	while (!list_empty(&dpm_list)) {
847		struct device *dev = to_device(dpm_list.next);
848
849		get_device(dev);
850		dev->power.status = DPM_PREPARING;
851		mutex_unlock(&dpm_list_mtx);
852
853		pm_runtime_get_noresume(dev);
854		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
855			/* Wake-up requested during system sleep transition. */
856			pm_runtime_put_noidle(dev);
857			error = -EBUSY;
858		} else {
859			error = device_prepare(dev, state);
860		}
861
862		mutex_lock(&dpm_list_mtx);
863		if (error) {
864			dev->power.status = DPM_ON;
865			if (error == -EAGAIN) {
866				put_device(dev);
867				error = 0;
868				continue;
869			}
870			printk(KERN_ERR "PM: Failed to prepare device %s "
871				"for power transition: error %d\n",
872				kobject_name(&dev->kobj), error);
873			put_device(dev);
874			break;
875		}
876		dev->power.status = DPM_SUSPENDING;
877		if (!list_empty(&dev->power.entry))
878			list_move_tail(&dev->power.entry, &list);
879		put_device(dev);
880	}
881	list_splice(&list, &dpm_list);
882	mutex_unlock(&dpm_list_mtx);
883	return error;
884}
885
886/**
887 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
888 * @state: PM transition of the system being carried out.
889 *
890 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
891 * callbacks for them.
892 */
893int dpm_suspend_start(pm_message_t state)
894{
895	int error;
896
897	might_sleep();
898	error = dpm_prepare(state);
899	if (!error)
900		error = dpm_suspend(state);
901	return error;
902}
903EXPORT_SYMBOL_GPL(dpm_suspend_start);
904
905void __suspend_report_result(const char *function, void *fn, int ret)
906{
907	if (ret)
908		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
909}
910EXPORT_SYMBOL_GPL(__suspend_report_result);
911