main.c revision 5af84b82701a96be4b033aaa51d86c72e2ded061
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/mutex.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25#include <linux/resume-trace.h>
26#include <linux/interrupt.h>
27#include <linux/sched.h>
28#include <linux/async.h>
29
30#include "../base.h"
31#include "power.h"
32
33/*
34 * The entries in the dpm_list list are in a depth first order, simply
35 * because children are guaranteed to be discovered after parents, and
36 * are inserted at the back of the list on discovery.
37 *
38 * Since device_pm_add() may be called with a device semaphore held,
39 * we must never try to acquire a device semaphore while holding
40 * dpm_list_mutex.
41 */
42
43LIST_HEAD(dpm_list);
44
45static DEFINE_MUTEX(dpm_list_mtx);
46static pm_message_t pm_transition;
47
48/*
49 * Set once the preparation of devices for a PM transition has started, reset
50 * before starting to resume devices.  Protected by dpm_list_mtx.
51 */
52static bool transition_started;
53
54/**
55 * device_pm_init - Initialize the PM-related part of a device object.
56 * @dev: Device object being initialized.
57 */
58void device_pm_init(struct device *dev)
59{
60	dev->power.status = DPM_ON;
61	init_completion(&dev->power.completion);
62	pm_runtime_init(dev);
63}
64
65/**
66 * device_pm_lock - Lock the list of active devices used by the PM core.
67 */
68void device_pm_lock(void)
69{
70	mutex_lock(&dpm_list_mtx);
71}
72
73/**
74 * device_pm_unlock - Unlock the list of active devices used by the PM core.
75 */
76void device_pm_unlock(void)
77{
78	mutex_unlock(&dpm_list_mtx);
79}
80
81/**
82 * device_pm_add - Add a device to the PM core's list of active devices.
83 * @dev: Device to add to the list.
84 */
85void device_pm_add(struct device *dev)
86{
87	pr_debug("PM: Adding info for %s:%s\n",
88		 dev->bus ? dev->bus->name : "No Bus",
89		 kobject_name(&dev->kobj));
90	mutex_lock(&dpm_list_mtx);
91	if (dev->parent) {
92		if (dev->parent->power.status >= DPM_SUSPENDING)
93			dev_warn(dev, "parent %s should not be sleeping\n",
94				 dev_name(dev->parent));
95	} else if (transition_started) {
96		/*
97		 * We refuse to register parentless devices while a PM
98		 * transition is in progress in order to avoid leaving them
99		 * unhandled down the road
100		 */
101		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
102	}
103
104	list_add_tail(&dev->power.entry, &dpm_list);
105	mutex_unlock(&dpm_list_mtx);
106}
107
108/**
109 * device_pm_remove - Remove a device from the PM core's list of active devices.
110 * @dev: Device to be removed from the list.
111 */
112void device_pm_remove(struct device *dev)
113{
114	pr_debug("PM: Removing info for %s:%s\n",
115		 dev->bus ? dev->bus->name : "No Bus",
116		 kobject_name(&dev->kobj));
117	complete_all(&dev->power.completion);
118	mutex_lock(&dpm_list_mtx);
119	list_del_init(&dev->power.entry);
120	mutex_unlock(&dpm_list_mtx);
121	pm_runtime_remove(dev);
122}
123
124/**
125 * device_pm_move_before - Move device in the PM core's list of active devices.
126 * @deva: Device to move in dpm_list.
127 * @devb: Device @deva should come before.
128 */
129void device_pm_move_before(struct device *deva, struct device *devb)
130{
131	pr_debug("PM: Moving %s:%s before %s:%s\n",
132		 deva->bus ? deva->bus->name : "No Bus",
133		 kobject_name(&deva->kobj),
134		 devb->bus ? devb->bus->name : "No Bus",
135		 kobject_name(&devb->kobj));
136	/* Delete deva from dpm_list and reinsert before devb. */
137	list_move_tail(&deva->power.entry, &devb->power.entry);
138}
139
140/**
141 * device_pm_move_after - Move device in the PM core's list of active devices.
142 * @deva: Device to move in dpm_list.
143 * @devb: Device @deva should come after.
144 */
145void device_pm_move_after(struct device *deva, struct device *devb)
146{
147	pr_debug("PM: Moving %s:%s after %s:%s\n",
148		 deva->bus ? deva->bus->name : "No Bus",
149		 kobject_name(&deva->kobj),
150		 devb->bus ? devb->bus->name : "No Bus",
151		 kobject_name(&devb->kobj));
152	/* Delete deva from dpm_list and reinsert after devb. */
153	list_move(&deva->power.entry, &devb->power.entry);
154}
155
156/**
157 * device_pm_move_last - Move device to end of the PM core's list of devices.
158 * @dev: Device to move in dpm_list.
159 */
160void device_pm_move_last(struct device *dev)
161{
162	pr_debug("PM: Moving %s:%s to end of list\n",
163		 dev->bus ? dev->bus->name : "No Bus",
164		 kobject_name(&dev->kobj));
165	list_move_tail(&dev->power.entry, &dpm_list);
166}
167
168static ktime_t initcall_debug_start(struct device *dev)
169{
170	ktime_t calltime = ktime_set(0, 0);
171
172	if (initcall_debug) {
173		pr_info("calling  %s+ @ %i\n",
174				dev_name(dev), task_pid_nr(current));
175		calltime = ktime_get();
176	}
177
178	return calltime;
179}
180
181static void initcall_debug_report(struct device *dev, ktime_t calltime,
182				  int error)
183{
184	ktime_t delta, rettime;
185
186	if (initcall_debug) {
187		rettime = ktime_get();
188		delta = ktime_sub(rettime, calltime);
189		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
190			error, (unsigned long long)ktime_to_ns(delta) >> 10);
191	}
192}
193
194/**
195 * dpm_wait - Wait for a PM operation to complete.
196 * @dev: Device to wait for.
197 * @async: If unset, wait only if the device's power.async_suspend flag is set.
198 */
199static void dpm_wait(struct device *dev, bool async)
200{
201	if (!dev)
202		return;
203
204	if (async || dev->power.async_suspend)
205		wait_for_completion(&dev->power.completion);
206}
207
208static int dpm_wait_fn(struct device *dev, void *async_ptr)
209{
210	dpm_wait(dev, *((bool *)async_ptr));
211	return 0;
212}
213
214static void dpm_wait_for_children(struct device *dev, bool async)
215{
216       device_for_each_child(dev, &async, dpm_wait_fn);
217}
218
219/**
220 * pm_op - Execute the PM operation appropriate for given PM event.
221 * @dev: Device to handle.
222 * @ops: PM operations to choose from.
223 * @state: PM transition of the system being carried out.
224 */
225static int pm_op(struct device *dev,
226		 const struct dev_pm_ops *ops,
227		 pm_message_t state)
228{
229	int error = 0;
230	ktime_t calltime;
231
232	calltime = initcall_debug_start(dev);
233
234	switch (state.event) {
235#ifdef CONFIG_SUSPEND
236	case PM_EVENT_SUSPEND:
237		if (ops->suspend) {
238			error = ops->suspend(dev);
239			suspend_report_result(ops->suspend, error);
240		}
241		break;
242	case PM_EVENT_RESUME:
243		if (ops->resume) {
244			error = ops->resume(dev);
245			suspend_report_result(ops->resume, error);
246		}
247		break;
248#endif /* CONFIG_SUSPEND */
249#ifdef CONFIG_HIBERNATION
250	case PM_EVENT_FREEZE:
251	case PM_EVENT_QUIESCE:
252		if (ops->freeze) {
253			error = ops->freeze(dev);
254			suspend_report_result(ops->freeze, error);
255		}
256		break;
257	case PM_EVENT_HIBERNATE:
258		if (ops->poweroff) {
259			error = ops->poweroff(dev);
260			suspend_report_result(ops->poweroff, error);
261		}
262		break;
263	case PM_EVENT_THAW:
264	case PM_EVENT_RECOVER:
265		if (ops->thaw) {
266			error = ops->thaw(dev);
267			suspend_report_result(ops->thaw, error);
268		}
269		break;
270	case PM_EVENT_RESTORE:
271		if (ops->restore) {
272			error = ops->restore(dev);
273			suspend_report_result(ops->restore, error);
274		}
275		break;
276#endif /* CONFIG_HIBERNATION */
277	default:
278		error = -EINVAL;
279	}
280
281	initcall_debug_report(dev, calltime, error);
282
283	return error;
284}
285
286/**
287 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
288 * @dev: Device to handle.
289 * @ops: PM operations to choose from.
290 * @state: PM transition of the system being carried out.
291 *
292 * The driver of @dev will not receive interrupts while this function is being
293 * executed.
294 */
295static int pm_noirq_op(struct device *dev,
296			const struct dev_pm_ops *ops,
297			pm_message_t state)
298{
299	int error = 0;
300	ktime_t calltime, delta, rettime;
301
302	if (initcall_debug) {
303		pr_info("calling  %s+ @ %i, parent: %s\n",
304				dev_name(dev), task_pid_nr(current),
305				dev->parent ? dev_name(dev->parent) : "none");
306		calltime = ktime_get();
307	}
308
309	switch (state.event) {
310#ifdef CONFIG_SUSPEND
311	case PM_EVENT_SUSPEND:
312		if (ops->suspend_noirq) {
313			error = ops->suspend_noirq(dev);
314			suspend_report_result(ops->suspend_noirq, error);
315		}
316		break;
317	case PM_EVENT_RESUME:
318		if (ops->resume_noirq) {
319			error = ops->resume_noirq(dev);
320			suspend_report_result(ops->resume_noirq, error);
321		}
322		break;
323#endif /* CONFIG_SUSPEND */
324#ifdef CONFIG_HIBERNATION
325	case PM_EVENT_FREEZE:
326	case PM_EVENT_QUIESCE:
327		if (ops->freeze_noirq) {
328			error = ops->freeze_noirq(dev);
329			suspend_report_result(ops->freeze_noirq, error);
330		}
331		break;
332	case PM_EVENT_HIBERNATE:
333		if (ops->poweroff_noirq) {
334			error = ops->poweroff_noirq(dev);
335			suspend_report_result(ops->poweroff_noirq, error);
336		}
337		break;
338	case PM_EVENT_THAW:
339	case PM_EVENT_RECOVER:
340		if (ops->thaw_noirq) {
341			error = ops->thaw_noirq(dev);
342			suspend_report_result(ops->thaw_noirq, error);
343		}
344		break;
345	case PM_EVENT_RESTORE:
346		if (ops->restore_noirq) {
347			error = ops->restore_noirq(dev);
348			suspend_report_result(ops->restore_noirq, error);
349		}
350		break;
351#endif /* CONFIG_HIBERNATION */
352	default:
353		error = -EINVAL;
354	}
355
356	if (initcall_debug) {
357		rettime = ktime_get();
358		delta = ktime_sub(rettime, calltime);
359		printk("initcall %s_i+ returned %d after %Ld usecs\n",
360			dev_name(dev), error,
361			(unsigned long long)ktime_to_ns(delta) >> 10);
362	}
363
364	return error;
365}
366
367static char *pm_verb(int event)
368{
369	switch (event) {
370	case PM_EVENT_SUSPEND:
371		return "suspend";
372	case PM_EVENT_RESUME:
373		return "resume";
374	case PM_EVENT_FREEZE:
375		return "freeze";
376	case PM_EVENT_QUIESCE:
377		return "quiesce";
378	case PM_EVENT_HIBERNATE:
379		return "hibernate";
380	case PM_EVENT_THAW:
381		return "thaw";
382	case PM_EVENT_RESTORE:
383		return "restore";
384	case PM_EVENT_RECOVER:
385		return "recover";
386	default:
387		return "(unknown PM event)";
388	}
389}
390
391static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
392{
393	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
394		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
395		", may wakeup" : "");
396}
397
398static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
399			int error)
400{
401	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
402		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
403}
404
405static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
406{
407	ktime_t calltime;
408	s64 usecs64;
409	int usecs;
410
411	calltime = ktime_get();
412	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
413	do_div(usecs64, NSEC_PER_USEC);
414	usecs = usecs64;
415	if (usecs == 0)
416		usecs = 1;
417	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
418		info ?: "", info ? " " : "", pm_verb(state.event),
419		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
420}
421
422/*------------------------- Resume routines -------------------------*/
423
424/**
425 * device_resume_noirq - Execute an "early resume" callback for given device.
426 * @dev: Device to handle.
427 * @state: PM transition of the system being carried out.
428 *
429 * The driver of @dev will not receive interrupts while this function is being
430 * executed.
431 */
432static int device_resume_noirq(struct device *dev, pm_message_t state)
433{
434	int error = 0;
435
436	TRACE_DEVICE(dev);
437	TRACE_RESUME(0);
438
439	if (dev->bus && dev->bus->pm) {
440		pm_dev_dbg(dev, state, "EARLY ");
441		error = pm_noirq_op(dev, dev->bus->pm, state);
442	}
443
444	TRACE_RESUME(error);
445	return error;
446}
447
448/**
449 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
450 * @state: PM transition of the system being carried out.
451 *
452 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
453 * enable device drivers to receive interrupts.
454 */
455void dpm_resume_noirq(pm_message_t state)
456{
457	struct device *dev;
458	ktime_t starttime = ktime_get();
459
460	mutex_lock(&dpm_list_mtx);
461	transition_started = false;
462	list_for_each_entry(dev, &dpm_list, power.entry)
463		if (dev->power.status > DPM_OFF) {
464			int error;
465
466			dev->power.status = DPM_OFF;
467			error = device_resume_noirq(dev, state);
468			if (error)
469				pm_dev_err(dev, state, " early", error);
470		}
471	mutex_unlock(&dpm_list_mtx);
472	dpm_show_time(starttime, state, "early");
473	resume_device_irqs();
474}
475EXPORT_SYMBOL_GPL(dpm_resume_noirq);
476
477/**
478 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
479 * @dev: Device to resume.
480 * @cb: Resume callback to execute.
481 */
482static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
483{
484	int error;
485	ktime_t calltime;
486
487	calltime = initcall_debug_start(dev);
488
489	error = cb(dev);
490	suspend_report_result(cb, error);
491
492	initcall_debug_report(dev, calltime, error);
493
494	return error;
495}
496
497/**
498 * __device_resume - Execute "resume" callbacks for given device.
499 * @dev: Device to handle.
500 * @state: PM transition of the system being carried out.
501 * @async: If true, the device is being resumed asynchronously.
502 */
503static int __device_resume(struct device *dev, pm_message_t state, bool async)
504{
505	int error = 0;
506
507	TRACE_DEVICE(dev);
508	TRACE_RESUME(0);
509
510	dpm_wait(dev->parent, async);
511	down(&dev->sem);
512
513	if (dev->bus) {
514		if (dev->bus->pm) {
515			pm_dev_dbg(dev, state, "");
516			error = pm_op(dev, dev->bus->pm, state);
517		} else if (dev->bus->resume) {
518			pm_dev_dbg(dev, state, "legacy ");
519			error = legacy_resume(dev, dev->bus->resume);
520		}
521		if (error)
522			goto End;
523	}
524
525	if (dev->type) {
526		if (dev->type->pm) {
527			pm_dev_dbg(dev, state, "type ");
528			error = pm_op(dev, dev->type->pm, state);
529		}
530		if (error)
531			goto End;
532	}
533
534	if (dev->class) {
535		if (dev->class->pm) {
536			pm_dev_dbg(dev, state, "class ");
537			error = pm_op(dev, dev->class->pm, state);
538		} else if (dev->class->resume) {
539			pm_dev_dbg(dev, state, "legacy class ");
540			error = legacy_resume(dev, dev->class->resume);
541		}
542	}
543 End:
544	up(&dev->sem);
545	complete_all(&dev->power.completion);
546
547	TRACE_RESUME(error);
548	return error;
549}
550
551static void async_resume(void *data, async_cookie_t cookie)
552{
553	struct device *dev = (struct device *)data;
554	int error;
555
556	error = __device_resume(dev, pm_transition, true);
557	if (error)
558		pm_dev_err(dev, pm_transition, " async", error);
559	put_device(dev);
560}
561
562static int device_resume(struct device *dev)
563{
564	INIT_COMPLETION(dev->power.completion);
565
566	if (dev->power.async_suspend && !pm_trace_is_enabled()) {
567		get_device(dev);
568		async_schedule(async_resume, dev);
569		return 0;
570	}
571
572	return __device_resume(dev, pm_transition, false);
573}
574
575/**
576 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
577 * @state: PM transition of the system being carried out.
578 *
579 * Execute the appropriate "resume" callback for all devices whose status
580 * indicates that they are suspended.
581 */
582static void dpm_resume(pm_message_t state)
583{
584	struct list_head list;
585	ktime_t starttime = ktime_get();
586
587	INIT_LIST_HEAD(&list);
588	mutex_lock(&dpm_list_mtx);
589	pm_transition = state;
590	while (!list_empty(&dpm_list)) {
591		struct device *dev = to_device(dpm_list.next);
592
593		get_device(dev);
594		if (dev->power.status >= DPM_OFF) {
595			int error;
596
597			dev->power.status = DPM_RESUMING;
598			mutex_unlock(&dpm_list_mtx);
599
600			error = device_resume(dev);
601
602			mutex_lock(&dpm_list_mtx);
603			if (error)
604				pm_dev_err(dev, state, "", error);
605		} else if (dev->power.status == DPM_SUSPENDING) {
606			/* Allow new children of the device to be registered */
607			dev->power.status = DPM_RESUMING;
608		}
609		if (!list_empty(&dev->power.entry))
610			list_move_tail(&dev->power.entry, &list);
611		put_device(dev);
612	}
613	list_splice(&list, &dpm_list);
614	mutex_unlock(&dpm_list_mtx);
615	async_synchronize_full();
616	dpm_show_time(starttime, state, NULL);
617}
618
619/**
620 * device_complete - Complete a PM transition for given device.
621 * @dev: Device to handle.
622 * @state: PM transition of the system being carried out.
623 */
624static void device_complete(struct device *dev, pm_message_t state)
625{
626	down(&dev->sem);
627
628	if (dev->class && dev->class->pm && dev->class->pm->complete) {
629		pm_dev_dbg(dev, state, "completing class ");
630		dev->class->pm->complete(dev);
631	}
632
633	if (dev->type && dev->type->pm && dev->type->pm->complete) {
634		pm_dev_dbg(dev, state, "completing type ");
635		dev->type->pm->complete(dev);
636	}
637
638	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
639		pm_dev_dbg(dev, state, "completing ");
640		dev->bus->pm->complete(dev);
641	}
642
643	up(&dev->sem);
644}
645
646/**
647 * dpm_complete - Complete a PM transition for all non-sysdev devices.
648 * @state: PM transition of the system being carried out.
649 *
650 * Execute the ->complete() callbacks for all devices whose PM status is not
651 * DPM_ON (this allows new devices to be registered).
652 */
653static void dpm_complete(pm_message_t state)
654{
655	struct list_head list;
656
657	INIT_LIST_HEAD(&list);
658	mutex_lock(&dpm_list_mtx);
659	transition_started = false;
660	while (!list_empty(&dpm_list)) {
661		struct device *dev = to_device(dpm_list.prev);
662
663		get_device(dev);
664		if (dev->power.status > DPM_ON) {
665			dev->power.status = DPM_ON;
666			mutex_unlock(&dpm_list_mtx);
667
668			device_complete(dev, state);
669			pm_runtime_put_sync(dev);
670
671			mutex_lock(&dpm_list_mtx);
672		}
673		if (!list_empty(&dev->power.entry))
674			list_move(&dev->power.entry, &list);
675		put_device(dev);
676	}
677	list_splice(&list, &dpm_list);
678	mutex_unlock(&dpm_list_mtx);
679}
680
681/**
682 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
683 * @state: PM transition of the system being carried out.
684 *
685 * Execute "resume" callbacks for all devices and complete the PM transition of
686 * the system.
687 */
688void dpm_resume_end(pm_message_t state)
689{
690	might_sleep();
691	dpm_resume(state);
692	dpm_complete(state);
693}
694EXPORT_SYMBOL_GPL(dpm_resume_end);
695
696
697/*------------------------- Suspend routines -------------------------*/
698
699/**
700 * resume_event - Return a "resume" message for given "suspend" sleep state.
701 * @sleep_state: PM message representing a sleep state.
702 *
703 * Return a PM message representing the resume event corresponding to given
704 * sleep state.
705 */
706static pm_message_t resume_event(pm_message_t sleep_state)
707{
708	switch (sleep_state.event) {
709	case PM_EVENT_SUSPEND:
710		return PMSG_RESUME;
711	case PM_EVENT_FREEZE:
712	case PM_EVENT_QUIESCE:
713		return PMSG_RECOVER;
714	case PM_EVENT_HIBERNATE:
715		return PMSG_RESTORE;
716	}
717	return PMSG_ON;
718}
719
720/**
721 * device_suspend_noirq - Execute a "late suspend" callback for given device.
722 * @dev: Device to handle.
723 * @state: PM transition of the system being carried out.
724 *
725 * The driver of @dev will not receive interrupts while this function is being
726 * executed.
727 */
728static int device_suspend_noirq(struct device *dev, pm_message_t state)
729{
730	int error = 0;
731
732	if (dev->bus && dev->bus->pm) {
733		pm_dev_dbg(dev, state, "LATE ");
734		error = pm_noirq_op(dev, dev->bus->pm, state);
735	}
736	return error;
737}
738
739/**
740 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
741 * @state: PM transition of the system being carried out.
742 *
743 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
744 * handlers for all non-sysdev devices.
745 */
746int dpm_suspend_noirq(pm_message_t state)
747{
748	struct device *dev;
749	ktime_t starttime = ktime_get();
750	int error = 0;
751
752	suspend_device_irqs();
753	mutex_lock(&dpm_list_mtx);
754	list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
755		error = device_suspend_noirq(dev, state);
756		if (error) {
757			pm_dev_err(dev, state, " late", error);
758			break;
759		}
760		dev->power.status = DPM_OFF_IRQ;
761	}
762	mutex_unlock(&dpm_list_mtx);
763	if (error)
764		dpm_resume_noirq(resume_event(state));
765	else
766		dpm_show_time(starttime, state, "late");
767	return error;
768}
769EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
770
771/**
772 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
773 * @dev: Device to suspend.
774 * @state: PM transition of the system being carried out.
775 * @cb: Suspend callback to execute.
776 */
777static int legacy_suspend(struct device *dev, pm_message_t state,
778			  int (*cb)(struct device *dev, pm_message_t state))
779{
780	int error;
781	ktime_t calltime;
782
783	calltime = initcall_debug_start(dev);
784
785	error = cb(dev, state);
786	suspend_report_result(cb, error);
787
788	initcall_debug_report(dev, calltime, error);
789
790	return error;
791}
792
793static int async_error;
794
795/**
796 * device_suspend - Execute "suspend" callbacks for given device.
797 * @dev: Device to handle.
798 * @state: PM transition of the system being carried out.
799 * @async: If true, the device is being suspended asynchronously.
800 */
801static int __device_suspend(struct device *dev, pm_message_t state, bool async)
802{
803	int error = 0;
804
805	dpm_wait_for_children(dev, async);
806	down(&dev->sem);
807
808	if (async_error)
809		goto End;
810
811	if (dev->class) {
812		if (dev->class->pm) {
813			pm_dev_dbg(dev, state, "class ");
814			error = pm_op(dev, dev->class->pm, state);
815		} else if (dev->class->suspend) {
816			pm_dev_dbg(dev, state, "legacy class ");
817			error = legacy_suspend(dev, state, dev->class->suspend);
818		}
819		if (error)
820			goto End;
821	}
822
823	if (dev->type) {
824		if (dev->type->pm) {
825			pm_dev_dbg(dev, state, "type ");
826			error = pm_op(dev, dev->type->pm, state);
827		}
828		if (error)
829			goto End;
830	}
831
832	if (dev->bus) {
833		if (dev->bus->pm) {
834			pm_dev_dbg(dev, state, "");
835			error = pm_op(dev, dev->bus->pm, state);
836		} else if (dev->bus->suspend) {
837			pm_dev_dbg(dev, state, "legacy ");
838			error = legacy_suspend(dev, state, dev->bus->suspend);
839		}
840	}
841
842	if (!error)
843		dev->power.status = DPM_OFF;
844
845 End:
846	up(&dev->sem);
847	complete_all(&dev->power.completion);
848
849	return error;
850}
851
852static void async_suspend(void *data, async_cookie_t cookie)
853{
854	struct device *dev = (struct device *)data;
855	int error;
856
857	error = __device_suspend(dev, pm_transition, true);
858	if (error) {
859		pm_dev_err(dev, pm_transition, " async", error);
860		async_error = error;
861	}
862
863	put_device(dev);
864}
865
866static int device_suspend(struct device *dev)
867{
868	INIT_COMPLETION(dev->power.completion);
869
870	if (dev->power.async_suspend) {
871		get_device(dev);
872		async_schedule(async_suspend, dev);
873		return 0;
874	}
875
876	return __device_suspend(dev, pm_transition, false);
877}
878
879/**
880 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
881 * @state: PM transition of the system being carried out.
882 */
883static int dpm_suspend(pm_message_t state)
884{
885	struct list_head list;
886	ktime_t starttime = ktime_get();
887	int error = 0;
888
889	INIT_LIST_HEAD(&list);
890	mutex_lock(&dpm_list_mtx);
891	pm_transition = state;
892	async_error = 0;
893	while (!list_empty(&dpm_list)) {
894		struct device *dev = to_device(dpm_list.prev);
895
896		get_device(dev);
897		mutex_unlock(&dpm_list_mtx);
898
899		error = device_suspend(dev);
900
901		mutex_lock(&dpm_list_mtx);
902		if (error) {
903			pm_dev_err(dev, state, "", error);
904			put_device(dev);
905			break;
906		}
907		if (!list_empty(&dev->power.entry))
908			list_move(&dev->power.entry, &list);
909		put_device(dev);
910		if (async_error)
911			break;
912	}
913	list_splice(&list, dpm_list.prev);
914	mutex_unlock(&dpm_list_mtx);
915	async_synchronize_full();
916	if (!error)
917		error = async_error;
918	if (!error)
919		dpm_show_time(starttime, state, NULL);
920	return error;
921}
922
923/**
924 * device_prepare - Prepare a device for system power transition.
925 * @dev: Device to handle.
926 * @state: PM transition of the system being carried out.
927 *
928 * Execute the ->prepare() callback(s) for given device.  No new children of the
929 * device may be registered after this function has returned.
930 */
931static int device_prepare(struct device *dev, pm_message_t state)
932{
933	int error = 0;
934
935	down(&dev->sem);
936
937	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
938		pm_dev_dbg(dev, state, "preparing ");
939		error = dev->bus->pm->prepare(dev);
940		suspend_report_result(dev->bus->pm->prepare, error);
941		if (error)
942			goto End;
943	}
944
945	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
946		pm_dev_dbg(dev, state, "preparing type ");
947		error = dev->type->pm->prepare(dev);
948		suspend_report_result(dev->type->pm->prepare, error);
949		if (error)
950			goto End;
951	}
952
953	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
954		pm_dev_dbg(dev, state, "preparing class ");
955		error = dev->class->pm->prepare(dev);
956		suspend_report_result(dev->class->pm->prepare, error);
957	}
958 End:
959	up(&dev->sem);
960
961	return error;
962}
963
964/**
965 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
966 * @state: PM transition of the system being carried out.
967 *
968 * Execute the ->prepare() callback(s) for all devices.
969 */
970static int dpm_prepare(pm_message_t state)
971{
972	struct list_head list;
973	int error = 0;
974
975	INIT_LIST_HEAD(&list);
976	mutex_lock(&dpm_list_mtx);
977	transition_started = true;
978	while (!list_empty(&dpm_list)) {
979		struct device *dev = to_device(dpm_list.next);
980
981		get_device(dev);
982		dev->power.status = DPM_PREPARING;
983		mutex_unlock(&dpm_list_mtx);
984
985		pm_runtime_get_noresume(dev);
986		if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
987			/* Wake-up requested during system sleep transition. */
988			pm_runtime_put_sync(dev);
989			error = -EBUSY;
990		} else {
991			error = device_prepare(dev, state);
992		}
993
994		mutex_lock(&dpm_list_mtx);
995		if (error) {
996			dev->power.status = DPM_ON;
997			if (error == -EAGAIN) {
998				put_device(dev);
999				error = 0;
1000				continue;
1001			}
1002			printk(KERN_ERR "PM: Failed to prepare device %s "
1003				"for power transition: error %d\n",
1004				kobject_name(&dev->kobj), error);
1005			put_device(dev);
1006			break;
1007		}
1008		dev->power.status = DPM_SUSPENDING;
1009		if (!list_empty(&dev->power.entry))
1010			list_move_tail(&dev->power.entry, &list);
1011		put_device(dev);
1012	}
1013	list_splice(&list, &dpm_list);
1014	mutex_unlock(&dpm_list_mtx);
1015	return error;
1016}
1017
1018/**
1019 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1020 * @state: PM transition of the system being carried out.
1021 *
1022 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1023 * callbacks for them.
1024 */
1025int dpm_suspend_start(pm_message_t state)
1026{
1027	int error;
1028
1029	might_sleep();
1030	error = dpm_prepare(state);
1031	if (!error)
1032		error = dpm_suspend(state);
1033	return error;
1034}
1035EXPORT_SYMBOL_GPL(dpm_suspend_start);
1036
1037void __suspend_report_result(const char *function, void *fn, int ret)
1038{
1039	if (ret)
1040		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1041}
1042EXPORT_SYMBOL_GPL(__suspend_report_result);
1043