main.c revision 8a43a9ab7b329aa8590f8a064df9bf8c80987507
1/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will intialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/mutex.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25#include <linux/resume-trace.h>
26#include <linux/interrupt.h>
27#include <linux/sched.h>
28#include <linux/async.h>
29#include <linux/suspend.h>
30
31#include "../base.h"
32#include "power.h"
33
34/*
35 * The entries in the dpm_list list are in a depth first order, simply
36 * because children are guaranteed to be discovered after parents, and
37 * are inserted at the back of the list on discovery.
38 *
39 * Since device_pm_add() may be called with a device lock held,
40 * we must never try to acquire a device lock while holding
41 * dpm_list_mutex.
42 */
43
44LIST_HEAD(dpm_list);
45LIST_HEAD(dpm_prepared_list);
46LIST_HEAD(dpm_suspended_list);
47LIST_HEAD(dpm_noirq_list);
48
49static DEFINE_MUTEX(dpm_list_mtx);
50static pm_message_t pm_transition;
51
52/*
53 * Set once the preparation of devices for a PM transition has started, reset
54 * before starting to resume devices.  Protected by dpm_list_mtx.
55 */
56static bool transition_started;
57
58static int async_error;
59
60/**
61 * device_pm_init - Initialize the PM-related part of a device object.
62 * @dev: Device object being initialized.
63 */
64void device_pm_init(struct device *dev)
65{
66	dev->power.status = DPM_ON;
67	init_completion(&dev->power.completion);
68	complete_all(&dev->power.completion);
69	dev->power.wakeup = NULL;
70	spin_lock_init(&dev->power.lock);
71	pm_runtime_init(dev);
72}
73
74/**
75 * device_pm_lock - Lock the list of active devices used by the PM core.
76 */
77void device_pm_lock(void)
78{
79	mutex_lock(&dpm_list_mtx);
80}
81
82/**
83 * device_pm_unlock - Unlock the list of active devices used by the PM core.
84 */
85void device_pm_unlock(void)
86{
87	mutex_unlock(&dpm_list_mtx);
88}
89
90/**
91 * device_pm_add - Add a device to the PM core's list of active devices.
92 * @dev: Device to add to the list.
93 */
94void device_pm_add(struct device *dev)
95{
96	pr_debug("PM: Adding info for %s:%s\n",
97		 dev->bus ? dev->bus->name : "No Bus",
98		 kobject_name(&dev->kobj));
99	mutex_lock(&dpm_list_mtx);
100	if (dev->parent) {
101		if (dev->parent->power.status >= DPM_SUSPENDING)
102			dev_warn(dev, "parent %s should not be sleeping\n",
103				 dev_name(dev->parent));
104	} else if (transition_started) {
105		/*
106		 * We refuse to register parentless devices while a PM
107		 * transition is in progress in order to avoid leaving them
108		 * unhandled down the road
109		 */
110		dev_WARN(dev, "Parentless device registered during a PM transaction\n");
111	}
112
113	list_add_tail(&dev->power.entry, &dpm_list);
114	mutex_unlock(&dpm_list_mtx);
115}
116
117/**
118 * device_pm_remove - Remove a device from the PM core's list of active devices.
119 * @dev: Device to be removed from the list.
120 */
121void device_pm_remove(struct device *dev)
122{
123	pr_debug("PM: Removing info for %s:%s\n",
124		 dev->bus ? dev->bus->name : "No Bus",
125		 kobject_name(&dev->kobj));
126	complete_all(&dev->power.completion);
127	mutex_lock(&dpm_list_mtx);
128	list_del_init(&dev->power.entry);
129	mutex_unlock(&dpm_list_mtx);
130	device_wakeup_disable(dev);
131	pm_runtime_remove(dev);
132}
133
134/**
135 * device_pm_move_before - Move device in the PM core's list of active devices.
136 * @deva: Device to move in dpm_list.
137 * @devb: Device @deva should come before.
138 */
139void device_pm_move_before(struct device *deva, struct device *devb)
140{
141	pr_debug("PM: Moving %s:%s before %s:%s\n",
142		 deva->bus ? deva->bus->name : "No Bus",
143		 kobject_name(&deva->kobj),
144		 devb->bus ? devb->bus->name : "No Bus",
145		 kobject_name(&devb->kobj));
146	/* Delete deva from dpm_list and reinsert before devb. */
147	list_move_tail(&deva->power.entry, &devb->power.entry);
148}
149
150/**
151 * device_pm_move_after - Move device in the PM core's list of active devices.
152 * @deva: Device to move in dpm_list.
153 * @devb: Device @deva should come after.
154 */
155void device_pm_move_after(struct device *deva, struct device *devb)
156{
157	pr_debug("PM: Moving %s:%s after %s:%s\n",
158		 deva->bus ? deva->bus->name : "No Bus",
159		 kobject_name(&deva->kobj),
160		 devb->bus ? devb->bus->name : "No Bus",
161		 kobject_name(&devb->kobj));
162	/* Delete deva from dpm_list and reinsert after devb. */
163	list_move(&deva->power.entry, &devb->power.entry);
164}
165
166/**
167 * device_pm_move_last - Move device to end of the PM core's list of devices.
168 * @dev: Device to move in dpm_list.
169 */
170void device_pm_move_last(struct device *dev)
171{
172	pr_debug("PM: Moving %s:%s to end of list\n",
173		 dev->bus ? dev->bus->name : "No Bus",
174		 kobject_name(&dev->kobj));
175	list_move_tail(&dev->power.entry, &dpm_list);
176}
177
178static ktime_t initcall_debug_start(struct device *dev)
179{
180	ktime_t calltime = ktime_set(0, 0);
181
182	if (initcall_debug) {
183		pr_info("calling  %s+ @ %i\n",
184				dev_name(dev), task_pid_nr(current));
185		calltime = ktime_get();
186	}
187
188	return calltime;
189}
190
191static void initcall_debug_report(struct device *dev, ktime_t calltime,
192				  int error)
193{
194	ktime_t delta, rettime;
195
196	if (initcall_debug) {
197		rettime = ktime_get();
198		delta = ktime_sub(rettime, calltime);
199		pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
200			error, (unsigned long long)ktime_to_ns(delta) >> 10);
201	}
202}
203
204/**
205 * dpm_wait - Wait for a PM operation to complete.
206 * @dev: Device to wait for.
207 * @async: If unset, wait only if the device's power.async_suspend flag is set.
208 */
209static void dpm_wait(struct device *dev, bool async)
210{
211	if (!dev)
212		return;
213
214	if (async || (pm_async_enabled && dev->power.async_suspend))
215		wait_for_completion(&dev->power.completion);
216}
217
218static int dpm_wait_fn(struct device *dev, void *async_ptr)
219{
220	dpm_wait(dev, *((bool *)async_ptr));
221	return 0;
222}
223
224static void dpm_wait_for_children(struct device *dev, bool async)
225{
226       device_for_each_child(dev, &async, dpm_wait_fn);
227}
228
229/**
230 * pm_op - Execute the PM operation appropriate for given PM event.
231 * @dev: Device to handle.
232 * @ops: PM operations to choose from.
233 * @state: PM transition of the system being carried out.
234 */
235static int pm_op(struct device *dev,
236		 const struct dev_pm_ops *ops,
237		 pm_message_t state)
238{
239	int error = 0;
240	ktime_t calltime;
241
242	calltime = initcall_debug_start(dev);
243
244	switch (state.event) {
245#ifdef CONFIG_SUSPEND
246	case PM_EVENT_SUSPEND:
247		if (ops->suspend) {
248			error = ops->suspend(dev);
249			suspend_report_result(ops->suspend, error);
250		}
251		break;
252	case PM_EVENT_RESUME:
253		if (ops->resume) {
254			error = ops->resume(dev);
255			suspend_report_result(ops->resume, error);
256		}
257		break;
258#endif /* CONFIG_SUSPEND */
259#ifdef CONFIG_HIBERNATION
260	case PM_EVENT_FREEZE:
261	case PM_EVENT_QUIESCE:
262		if (ops->freeze) {
263			error = ops->freeze(dev);
264			suspend_report_result(ops->freeze, error);
265		}
266		break;
267	case PM_EVENT_HIBERNATE:
268		if (ops->poweroff) {
269			error = ops->poweroff(dev);
270			suspend_report_result(ops->poweroff, error);
271		}
272		break;
273	case PM_EVENT_THAW:
274	case PM_EVENT_RECOVER:
275		if (ops->thaw) {
276			error = ops->thaw(dev);
277			suspend_report_result(ops->thaw, error);
278		}
279		break;
280	case PM_EVENT_RESTORE:
281		if (ops->restore) {
282			error = ops->restore(dev);
283			suspend_report_result(ops->restore, error);
284		}
285		break;
286#endif /* CONFIG_HIBERNATION */
287	default:
288		error = -EINVAL;
289	}
290
291	initcall_debug_report(dev, calltime, error);
292
293	return error;
294}
295
296/**
297 * pm_noirq_op - Execute the PM operation appropriate for given PM event.
298 * @dev: Device to handle.
299 * @ops: PM operations to choose from.
300 * @state: PM transition of the system being carried out.
301 *
302 * The driver of @dev will not receive interrupts while this function is being
303 * executed.
304 */
305static int pm_noirq_op(struct device *dev,
306			const struct dev_pm_ops *ops,
307			pm_message_t state)
308{
309	int error = 0;
310	ktime_t calltime = ktime_set(0, 0), delta, rettime;
311
312	if (initcall_debug) {
313		pr_info("calling  %s+ @ %i, parent: %s\n",
314				dev_name(dev), task_pid_nr(current),
315				dev->parent ? dev_name(dev->parent) : "none");
316		calltime = ktime_get();
317	}
318
319	switch (state.event) {
320#ifdef CONFIG_SUSPEND
321	case PM_EVENT_SUSPEND:
322		if (ops->suspend_noirq) {
323			error = ops->suspend_noirq(dev);
324			suspend_report_result(ops->suspend_noirq, error);
325		}
326		break;
327	case PM_EVENT_RESUME:
328		if (ops->resume_noirq) {
329			error = ops->resume_noirq(dev);
330			suspend_report_result(ops->resume_noirq, error);
331		}
332		break;
333#endif /* CONFIG_SUSPEND */
334#ifdef CONFIG_HIBERNATION
335	case PM_EVENT_FREEZE:
336	case PM_EVENT_QUIESCE:
337		if (ops->freeze_noirq) {
338			error = ops->freeze_noirq(dev);
339			suspend_report_result(ops->freeze_noirq, error);
340		}
341		break;
342	case PM_EVENT_HIBERNATE:
343		if (ops->poweroff_noirq) {
344			error = ops->poweroff_noirq(dev);
345			suspend_report_result(ops->poweroff_noirq, error);
346		}
347		break;
348	case PM_EVENT_THAW:
349	case PM_EVENT_RECOVER:
350		if (ops->thaw_noirq) {
351			error = ops->thaw_noirq(dev);
352			suspend_report_result(ops->thaw_noirq, error);
353		}
354		break;
355	case PM_EVENT_RESTORE:
356		if (ops->restore_noirq) {
357			error = ops->restore_noirq(dev);
358			suspend_report_result(ops->restore_noirq, error);
359		}
360		break;
361#endif /* CONFIG_HIBERNATION */
362	default:
363		error = -EINVAL;
364	}
365
366	if (initcall_debug) {
367		rettime = ktime_get();
368		delta = ktime_sub(rettime, calltime);
369		printk("initcall %s_i+ returned %d after %Ld usecs\n",
370			dev_name(dev), error,
371			(unsigned long long)ktime_to_ns(delta) >> 10);
372	}
373
374	return error;
375}
376
377static char *pm_verb(int event)
378{
379	switch (event) {
380	case PM_EVENT_SUSPEND:
381		return "suspend";
382	case PM_EVENT_RESUME:
383		return "resume";
384	case PM_EVENT_FREEZE:
385		return "freeze";
386	case PM_EVENT_QUIESCE:
387		return "quiesce";
388	case PM_EVENT_HIBERNATE:
389		return "hibernate";
390	case PM_EVENT_THAW:
391		return "thaw";
392	case PM_EVENT_RESTORE:
393		return "restore";
394	case PM_EVENT_RECOVER:
395		return "recover";
396	default:
397		return "(unknown PM event)";
398	}
399}
400
401static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
402{
403	dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
404		((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
405		", may wakeup" : "");
406}
407
408static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
409			int error)
410{
411	printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
412		kobject_name(&dev->kobj), pm_verb(state.event), info, error);
413}
414
415static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
416{
417	ktime_t calltime;
418	u64 usecs64;
419	int usecs;
420
421	calltime = ktime_get();
422	usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
423	do_div(usecs64, NSEC_PER_USEC);
424	usecs = usecs64;
425	if (usecs == 0)
426		usecs = 1;
427	pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
428		info ?: "", info ? " " : "", pm_verb(state.event),
429		usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
430}
431
432/*------------------------- Resume routines -------------------------*/
433
434/**
435 * device_resume_noirq - Execute an "early resume" callback for given device.
436 * @dev: Device to handle.
437 * @state: PM transition of the system being carried out.
438 *
439 * The driver of @dev will not receive interrupts while this function is being
440 * executed.
441 */
442static int device_resume_noirq(struct device *dev, pm_message_t state)
443{
444	int error = 0;
445
446	TRACE_DEVICE(dev);
447	TRACE_RESUME(0);
448
449	if (dev->bus && dev->bus->pm) {
450		pm_dev_dbg(dev, state, "EARLY ");
451		error = pm_noirq_op(dev, dev->bus->pm, state);
452		if (error)
453			goto End;
454	}
455
456	if (dev->type && dev->type->pm) {
457		pm_dev_dbg(dev, state, "EARLY type ");
458		error = pm_noirq_op(dev, dev->type->pm, state);
459		if (error)
460			goto End;
461	}
462
463	if (dev->class && dev->class->pm) {
464		pm_dev_dbg(dev, state, "EARLY class ");
465		error = pm_noirq_op(dev, dev->class->pm, state);
466	}
467
468End:
469	TRACE_RESUME(error);
470	return error;
471}
472
473/**
474 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
475 * @state: PM transition of the system being carried out.
476 *
477 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
478 * enable device drivers to receive interrupts.
479 */
480void dpm_resume_noirq(pm_message_t state)
481{
482	ktime_t starttime = ktime_get();
483
484	mutex_lock(&dpm_list_mtx);
485	transition_started = false;
486	while (!list_empty(&dpm_noirq_list)) {
487		struct device *dev = to_device(dpm_noirq_list.next);
488
489		get_device(dev);
490		if (dev->power.status > DPM_OFF) {
491			int error;
492
493			dev->power.status = DPM_OFF;
494			mutex_unlock(&dpm_list_mtx);
495
496			error = device_resume_noirq(dev, state);
497
498			mutex_lock(&dpm_list_mtx);
499			if (error)
500				pm_dev_err(dev, state, " early", error);
501		}
502		if (!list_empty(&dev->power.entry))
503			list_move_tail(&dev->power.entry, &dpm_suspended_list);
504		put_device(dev);
505	}
506	mutex_unlock(&dpm_list_mtx);
507	dpm_show_time(starttime, state, "early");
508	resume_device_irqs();
509}
510EXPORT_SYMBOL_GPL(dpm_resume_noirq);
511
512/**
513 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
514 * @dev: Device to resume.
515 * @cb: Resume callback to execute.
516 */
517static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
518{
519	int error;
520	ktime_t calltime;
521
522	calltime = initcall_debug_start(dev);
523
524	error = cb(dev);
525	suspend_report_result(cb, error);
526
527	initcall_debug_report(dev, calltime, error);
528
529	return error;
530}
531
532/**
533 * device_resume - Execute "resume" callbacks for given device.
534 * @dev: Device to handle.
535 * @state: PM transition of the system being carried out.
536 * @async: If true, the device is being resumed asynchronously.
537 */
538static int device_resume(struct device *dev, pm_message_t state, bool async)
539{
540	int error = 0;
541
542	TRACE_DEVICE(dev);
543	TRACE_RESUME(0);
544
545	dpm_wait(dev->parent, async);
546	device_lock(dev);
547
548	dev->power.status = DPM_RESUMING;
549
550	if (dev->bus) {
551		if (dev->bus->pm) {
552			pm_dev_dbg(dev, state, "");
553			error = pm_op(dev, dev->bus->pm, state);
554		} else if (dev->bus->resume) {
555			pm_dev_dbg(dev, state, "legacy ");
556			error = legacy_resume(dev, dev->bus->resume);
557		}
558		if (error)
559			goto End;
560	}
561
562	if (dev->type) {
563		if (dev->type->pm) {
564			pm_dev_dbg(dev, state, "type ");
565			error = pm_op(dev, dev->type->pm, state);
566		}
567		if (error)
568			goto End;
569	}
570
571	if (dev->class) {
572		if (dev->class->pm) {
573			pm_dev_dbg(dev, state, "class ");
574			error = pm_op(dev, dev->class->pm, state);
575		} else if (dev->class->resume) {
576			pm_dev_dbg(dev, state, "legacy class ");
577			error = legacy_resume(dev, dev->class->resume);
578		}
579	}
580 End:
581	device_unlock(dev);
582	complete_all(&dev->power.completion);
583
584	TRACE_RESUME(error);
585	return error;
586}
587
588static void async_resume(void *data, async_cookie_t cookie)
589{
590	struct device *dev = (struct device *)data;
591	int error;
592
593	error = device_resume(dev, pm_transition, true);
594	if (error)
595		pm_dev_err(dev, pm_transition, " async", error);
596	put_device(dev);
597}
598
599static bool is_async(struct device *dev)
600{
601	return dev->power.async_suspend && pm_async_enabled
602		&& !pm_trace_is_enabled();
603}
604
605/**
606 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
607 * @state: PM transition of the system being carried out.
608 *
609 * Execute the appropriate "resume" callback for all devices whose status
610 * indicates that they are suspended.
611 */
612static void dpm_resume(pm_message_t state)
613{
614	struct device *dev;
615	ktime_t starttime = ktime_get();
616
617	mutex_lock(&dpm_list_mtx);
618	pm_transition = state;
619	async_error = 0;
620
621	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
622		if (dev->power.status < DPM_OFF)
623			continue;
624
625		INIT_COMPLETION(dev->power.completion);
626		if (is_async(dev)) {
627			get_device(dev);
628			async_schedule(async_resume, dev);
629		}
630	}
631
632	while (!list_empty(&dpm_suspended_list)) {
633		dev = to_device(dpm_suspended_list.next);
634		get_device(dev);
635		if (dev->power.status >= DPM_OFF && !is_async(dev)) {
636			int error;
637
638			mutex_unlock(&dpm_list_mtx);
639
640			error = device_resume(dev, state, false);
641
642			mutex_lock(&dpm_list_mtx);
643			if (error)
644				pm_dev_err(dev, state, "", error);
645		}
646		if (!list_empty(&dev->power.entry))
647			list_move_tail(&dev->power.entry, &dpm_prepared_list);
648		put_device(dev);
649	}
650	mutex_unlock(&dpm_list_mtx);
651	async_synchronize_full();
652	dpm_show_time(starttime, state, NULL);
653}
654
655/**
656 * device_complete - Complete a PM transition for given device.
657 * @dev: Device to handle.
658 * @state: PM transition of the system being carried out.
659 */
660static void device_complete(struct device *dev, pm_message_t state)
661{
662	device_lock(dev);
663
664	if (dev->class && dev->class->pm && dev->class->pm->complete) {
665		pm_dev_dbg(dev, state, "completing class ");
666		dev->class->pm->complete(dev);
667	}
668
669	if (dev->type && dev->type->pm && dev->type->pm->complete) {
670		pm_dev_dbg(dev, state, "completing type ");
671		dev->type->pm->complete(dev);
672	}
673
674	if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
675		pm_dev_dbg(dev, state, "completing ");
676		dev->bus->pm->complete(dev);
677	}
678
679	device_unlock(dev);
680}
681
682/**
683 * dpm_complete - Complete a PM transition for all non-sysdev devices.
684 * @state: PM transition of the system being carried out.
685 *
686 * Execute the ->complete() callbacks for all devices whose PM status is not
687 * DPM_ON (this allows new devices to be registered).
688 */
689static void dpm_complete(pm_message_t state)
690{
691	struct list_head list;
692
693	INIT_LIST_HEAD(&list);
694	mutex_lock(&dpm_list_mtx);
695	transition_started = false;
696	while (!list_empty(&dpm_prepared_list)) {
697		struct device *dev = to_device(dpm_prepared_list.prev);
698
699		get_device(dev);
700		if (dev->power.status > DPM_ON) {
701			dev->power.status = DPM_ON;
702			mutex_unlock(&dpm_list_mtx);
703
704			device_complete(dev, state);
705			pm_runtime_put_sync(dev);
706
707			mutex_lock(&dpm_list_mtx);
708		}
709		if (!list_empty(&dev->power.entry))
710			list_move(&dev->power.entry, &list);
711		put_device(dev);
712	}
713	list_splice(&list, &dpm_list);
714	mutex_unlock(&dpm_list_mtx);
715}
716
717/**
718 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
719 * @state: PM transition of the system being carried out.
720 *
721 * Execute "resume" callbacks for all devices and complete the PM transition of
722 * the system.
723 */
724void dpm_resume_end(pm_message_t state)
725{
726	might_sleep();
727	dpm_resume(state);
728	dpm_complete(state);
729}
730EXPORT_SYMBOL_GPL(dpm_resume_end);
731
732
733/*------------------------- Suspend routines -------------------------*/
734
735/**
736 * resume_event - Return a "resume" message for given "suspend" sleep state.
737 * @sleep_state: PM message representing a sleep state.
738 *
739 * Return a PM message representing the resume event corresponding to given
740 * sleep state.
741 */
742static pm_message_t resume_event(pm_message_t sleep_state)
743{
744	switch (sleep_state.event) {
745	case PM_EVENT_SUSPEND:
746		return PMSG_RESUME;
747	case PM_EVENT_FREEZE:
748	case PM_EVENT_QUIESCE:
749		return PMSG_RECOVER;
750	case PM_EVENT_HIBERNATE:
751		return PMSG_RESTORE;
752	}
753	return PMSG_ON;
754}
755
756/**
757 * device_suspend_noirq - Execute a "late suspend" callback for given device.
758 * @dev: Device to handle.
759 * @state: PM transition of the system being carried out.
760 *
761 * The driver of @dev will not receive interrupts while this function is being
762 * executed.
763 */
764static int device_suspend_noirq(struct device *dev, pm_message_t state)
765{
766	int error = 0;
767
768	if (dev->class && dev->class->pm) {
769		pm_dev_dbg(dev, state, "LATE class ");
770		error = pm_noirq_op(dev, dev->class->pm, state);
771		if (error)
772			goto End;
773	}
774
775	if (dev->type && dev->type->pm) {
776		pm_dev_dbg(dev, state, "LATE type ");
777		error = pm_noirq_op(dev, dev->type->pm, state);
778		if (error)
779			goto End;
780	}
781
782	if (dev->bus && dev->bus->pm) {
783		pm_dev_dbg(dev, state, "LATE ");
784		error = pm_noirq_op(dev, dev->bus->pm, state);
785	}
786
787End:
788	return error;
789}
790
791/**
792 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
793 * @state: PM transition of the system being carried out.
794 *
795 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
796 * handlers for all non-sysdev devices.
797 */
798int dpm_suspend_noirq(pm_message_t state)
799{
800	ktime_t starttime = ktime_get();
801	int error = 0;
802
803	suspend_device_irqs();
804	mutex_lock(&dpm_list_mtx);
805	while (!list_empty(&dpm_suspended_list)) {
806		struct device *dev = to_device(dpm_suspended_list.prev);
807
808		get_device(dev);
809		mutex_unlock(&dpm_list_mtx);
810
811		error = device_suspend_noirq(dev, state);
812
813		mutex_lock(&dpm_list_mtx);
814		if (error) {
815			pm_dev_err(dev, state, " late", error);
816			put_device(dev);
817			break;
818		}
819		dev->power.status = DPM_OFF_IRQ;
820		if (!list_empty(&dev->power.entry))
821			list_move(&dev->power.entry, &dpm_noirq_list);
822		put_device(dev);
823	}
824	mutex_unlock(&dpm_list_mtx);
825	if (error)
826		dpm_resume_noirq(resume_event(state));
827	else
828		dpm_show_time(starttime, state, "late");
829	return error;
830}
831EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
832
833/**
834 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
835 * @dev: Device to suspend.
836 * @state: PM transition of the system being carried out.
837 * @cb: Suspend callback to execute.
838 */
839static int legacy_suspend(struct device *dev, pm_message_t state,
840			  int (*cb)(struct device *dev, pm_message_t state))
841{
842	int error;
843	ktime_t calltime;
844
845	calltime = initcall_debug_start(dev);
846
847	error = cb(dev, state);
848	suspend_report_result(cb, error);
849
850	initcall_debug_report(dev, calltime, error);
851
852	return error;
853}
854
855/**
856 * device_suspend - Execute "suspend" callbacks for given device.
857 * @dev: Device to handle.
858 * @state: PM transition of the system being carried out.
859 * @async: If true, the device is being suspended asynchronously.
860 */
861static int __device_suspend(struct device *dev, pm_message_t state, bool async)
862{
863	int error = 0;
864
865	dpm_wait_for_children(dev, async);
866	device_lock(dev);
867
868	if (async_error)
869		goto End;
870
871	if (pm_wakeup_pending()) {
872		async_error = -EBUSY;
873		goto End;
874	}
875
876	if (dev->class) {
877		if (dev->class->pm) {
878			pm_dev_dbg(dev, state, "class ");
879			error = pm_op(dev, dev->class->pm, state);
880		} else if (dev->class->suspend) {
881			pm_dev_dbg(dev, state, "legacy class ");
882			error = legacy_suspend(dev, state, dev->class->suspend);
883		}
884		if (error)
885			goto End;
886	}
887
888	if (dev->type) {
889		if (dev->type->pm) {
890			pm_dev_dbg(dev, state, "type ");
891			error = pm_op(dev, dev->type->pm, state);
892		}
893		if (error)
894			goto End;
895	}
896
897	if (dev->bus) {
898		if (dev->bus->pm) {
899			pm_dev_dbg(dev, state, "");
900			error = pm_op(dev, dev->bus->pm, state);
901		} else if (dev->bus->suspend) {
902			pm_dev_dbg(dev, state, "legacy ");
903			error = legacy_suspend(dev, state, dev->bus->suspend);
904		}
905	}
906
907	if (!error)
908		dev->power.status = DPM_OFF;
909
910 End:
911	device_unlock(dev);
912	complete_all(&dev->power.completion);
913
914	if (error)
915		async_error = error;
916
917	return error;
918}
919
920static void async_suspend(void *data, async_cookie_t cookie)
921{
922	struct device *dev = (struct device *)data;
923	int error;
924
925	error = __device_suspend(dev, pm_transition, true);
926	if (error)
927		pm_dev_err(dev, pm_transition, " async", error);
928
929	put_device(dev);
930}
931
932static int device_suspend(struct device *dev)
933{
934	INIT_COMPLETION(dev->power.completion);
935
936	if (pm_async_enabled && dev->power.async_suspend) {
937		get_device(dev);
938		async_schedule(async_suspend, dev);
939		return 0;
940	}
941
942	return __device_suspend(dev, pm_transition, false);
943}
944
945/**
946 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
947 * @state: PM transition of the system being carried out.
948 */
949static int dpm_suspend(pm_message_t state)
950{
951	ktime_t starttime = ktime_get();
952	int error = 0;
953
954	mutex_lock(&dpm_list_mtx);
955	pm_transition = state;
956	async_error = 0;
957	while (!list_empty(&dpm_prepared_list)) {
958		struct device *dev = to_device(dpm_prepared_list.prev);
959
960		get_device(dev);
961		mutex_unlock(&dpm_list_mtx);
962
963		error = device_suspend(dev);
964
965		mutex_lock(&dpm_list_mtx);
966		if (error) {
967			pm_dev_err(dev, state, "", error);
968			put_device(dev);
969			break;
970		}
971		if (!list_empty(&dev->power.entry))
972			list_move(&dev->power.entry, &dpm_suspended_list);
973		put_device(dev);
974		if (async_error)
975			break;
976	}
977	mutex_unlock(&dpm_list_mtx);
978	async_synchronize_full();
979	if (!error)
980		error = async_error;
981	if (!error)
982		dpm_show_time(starttime, state, NULL);
983	return error;
984}
985
986/**
987 * device_prepare - Prepare a device for system power transition.
988 * @dev: Device to handle.
989 * @state: PM transition of the system being carried out.
990 *
991 * Execute the ->prepare() callback(s) for given device.  No new children of the
992 * device may be registered after this function has returned.
993 */
994static int device_prepare(struct device *dev, pm_message_t state)
995{
996	int error = 0;
997
998	device_lock(dev);
999
1000	if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
1001		pm_dev_dbg(dev, state, "preparing ");
1002		error = dev->bus->pm->prepare(dev);
1003		suspend_report_result(dev->bus->pm->prepare, error);
1004		if (error)
1005			goto End;
1006	}
1007
1008	if (dev->type && dev->type->pm && dev->type->pm->prepare) {
1009		pm_dev_dbg(dev, state, "preparing type ");
1010		error = dev->type->pm->prepare(dev);
1011		suspend_report_result(dev->type->pm->prepare, error);
1012		if (error)
1013			goto End;
1014	}
1015
1016	if (dev->class && dev->class->pm && dev->class->pm->prepare) {
1017		pm_dev_dbg(dev, state, "preparing class ");
1018		error = dev->class->pm->prepare(dev);
1019		suspend_report_result(dev->class->pm->prepare, error);
1020	}
1021 End:
1022	device_unlock(dev);
1023
1024	return error;
1025}
1026
1027/**
1028 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1029 * @state: PM transition of the system being carried out.
1030 *
1031 * Execute the ->prepare() callback(s) for all devices.
1032 */
1033static int dpm_prepare(pm_message_t state)
1034{
1035	int error = 0;
1036
1037	mutex_lock(&dpm_list_mtx);
1038	transition_started = true;
1039	while (!list_empty(&dpm_list)) {
1040		struct device *dev = to_device(dpm_list.next);
1041
1042		get_device(dev);
1043		dev->power.status = DPM_PREPARING;
1044		mutex_unlock(&dpm_list_mtx);
1045
1046		pm_runtime_get_noresume(dev);
1047		if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1048			pm_wakeup_event(dev, 0);
1049
1050		if (pm_wakeup_pending()) {
1051			pm_runtime_put_sync(dev);
1052			error = -EBUSY;
1053		} else {
1054			error = device_prepare(dev, state);
1055		}
1056
1057		mutex_lock(&dpm_list_mtx);
1058		if (error) {
1059			dev->power.status = DPM_ON;
1060			if (error == -EAGAIN) {
1061				put_device(dev);
1062				error = 0;
1063				continue;
1064			}
1065			printk(KERN_INFO "PM: Device %s not prepared "
1066				"for power transition: code %d\n",
1067				kobject_name(&dev->kobj), error);
1068			put_device(dev);
1069			break;
1070		}
1071		dev->power.status = DPM_SUSPENDING;
1072		if (!list_empty(&dev->power.entry))
1073			list_move_tail(&dev->power.entry, &dpm_prepared_list);
1074		put_device(dev);
1075	}
1076	mutex_unlock(&dpm_list_mtx);
1077	return error;
1078}
1079
1080/**
1081 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1082 * @state: PM transition of the system being carried out.
1083 *
1084 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1085 * callbacks for them.
1086 */
1087int dpm_suspend_start(pm_message_t state)
1088{
1089	int error;
1090
1091	might_sleep();
1092	error = dpm_prepare(state);
1093	if (!error)
1094		error = dpm_suspend(state);
1095	return error;
1096}
1097EXPORT_SYMBOL_GPL(dpm_suspend_start);
1098
1099void __suspend_report_result(const char *function, void *fn, int ret)
1100{
1101	if (ret)
1102		printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1103}
1104EXPORT_SYMBOL_GPL(__suspend_report_result);
1105
1106/**
1107 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1108 * @dev: Device to wait for.
1109 * @subordinate: Device that needs to wait for @dev.
1110 */
1111int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1112{
1113	dpm_wait(dev, subordinate->power.async_suspend);
1114	return async_error;
1115}
1116EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1117