2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 mutex_unlock(&dpm_list_mtx);
139 * device_pm_remove - Remove a device from the PM core's list of active devices.
140 * @dev: Device to be removed from the list.
142 void device_pm_remove(struct device *dev)
144 pr_debug("PM: Removing info for %s:%s\n",
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 complete_all(&dev->power.completion);
147 mutex_lock(&dpm_list_mtx);
148 list_del_init(&dev->power.entry);
149 mutex_unlock(&dpm_list_mtx);
150 device_wakeup_disable(dev);
151 pm_runtime_remove(dev);
152 device_pm_check_callbacks(dev);
156 * device_pm_move_before - Move device in the PM core's list of active devices.
157 * @deva: Device to move in dpm_list.
158 * @devb: Device @deva should come before.
160 void device_pm_move_before(struct device *deva, struct device *devb)
165 pr_debug("PM: Moving %s:%s before %s:%s\n",
166 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
167 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
168 /* Delete deva from dpm_list and reinsert before devb. */
169 list_move_tail(&deva->power.entry, &devb->power.entry);
173 * device_pm_move_after - Move device in the PM core's list of active devices.
174 * @deva: Device to move in dpm_list.
175 * @devb: Device @deva should come after.
177 void device_pm_move_after(struct device *deva, struct device *devb)
182 pr_debug("PM: Moving %s:%s after %s:%s\n",
183 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
184 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
185 /* Delete deva from dpm_list and reinsert after devb. */
186 list_move(&deva->power.entry, &devb->power.entry);
190 * device_pm_move_last - Move device to end of the PM core's list of devices.
191 * @dev: Device to move in dpm_list.
193 void device_pm_move_last(struct device *dev)
195 pr_debug("PM: Moving %s:%s to end of list\n",
196 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
197 list_move_tail(&dev->power.entry, &dpm_list);
200 static ktime_t initcall_debug_start(struct device *dev)
202 ktime_t calltime = ktime_set(0, 0);
204 if (pm_print_times_enabled) {
205 pr_info("calling %s+ @ %i, parent: %s\n",
206 dev_name(dev), task_pid_nr(current),
207 dev->parent ? dev_name(dev->parent) : "none");
208 calltime = ktime_get();
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215 int error, pm_message_t state, char *info)
220 rettime = ktime_get();
221 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
223 if (pm_print_times_enabled) {
224 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
225 error, (unsigned long long)nsecs >> 10);
230 * dpm_wait - Wait for a PM operation to complete.
231 * @dev: Device to wait for.
232 * @async: If unset, wait only if the device's power.async_suspend flag is set.
234 static void dpm_wait(struct device *dev, bool async)
239 if (async || (pm_async_enabled && dev->power.async_suspend))
240 wait_for_completion(&dev->power.completion);
243 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 dpm_wait(dev, *((bool *)async_ptr));
249 static void dpm_wait_for_children(struct device *dev, bool async)
251 device_for_each_child(dev, &async, dpm_wait_fn);
255 * pm_op - Return the PM operation appropriate for given PM event.
256 * @ops: PM operations to choose from.
257 * @state: PM transition of the system being carried out.
259 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
261 switch (state.event) {
262 #ifdef CONFIG_SUSPEND
263 case PM_EVENT_SUSPEND:
265 case PM_EVENT_RESUME:
267 #endif /* CONFIG_SUSPEND */
268 #ifdef CONFIG_HIBERNATE_CALLBACKS
269 case PM_EVENT_FREEZE:
270 case PM_EVENT_QUIESCE:
272 case PM_EVENT_HIBERNATE:
273 return ops->poweroff;
275 case PM_EVENT_RECOVER:
278 case PM_EVENT_RESTORE:
280 #endif /* CONFIG_HIBERNATE_CALLBACKS */
287 * pm_late_early_op - Return the PM operation appropriate for given PM event.
288 * @ops: PM operations to choose from.
289 * @state: PM transition of the system being carried out.
291 * Runtime PM is disabled for @dev while this function is being executed.
293 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
296 switch (state.event) {
297 #ifdef CONFIG_SUSPEND
298 case PM_EVENT_SUSPEND:
299 return ops->suspend_late;
300 case PM_EVENT_RESUME:
301 return ops->resume_early;
302 #endif /* CONFIG_SUSPEND */
303 #ifdef CONFIG_HIBERNATE_CALLBACKS
304 case PM_EVENT_FREEZE:
305 case PM_EVENT_QUIESCE:
306 return ops->freeze_late;
307 case PM_EVENT_HIBERNATE:
308 return ops->poweroff_late;
310 case PM_EVENT_RECOVER:
311 return ops->thaw_early;
312 case PM_EVENT_RESTORE:
313 return ops->restore_early;
314 #endif /* CONFIG_HIBERNATE_CALLBACKS */
321 * pm_noirq_op - Return the PM operation appropriate for given PM event.
322 * @ops: PM operations to choose from.
323 * @state: PM transition of the system being carried out.
325 * The driver of @dev will not receive interrupts while this function is being
328 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
330 switch (state.event) {
331 #ifdef CONFIG_SUSPEND
332 case PM_EVENT_SUSPEND:
333 return ops->suspend_noirq;
334 case PM_EVENT_RESUME:
335 return ops->resume_noirq;
336 #endif /* CONFIG_SUSPEND */
337 #ifdef CONFIG_HIBERNATE_CALLBACKS
338 case PM_EVENT_FREEZE:
339 case PM_EVENT_QUIESCE:
340 return ops->freeze_noirq;
341 case PM_EVENT_HIBERNATE:
342 return ops->poweroff_noirq;
344 case PM_EVENT_RECOVER:
345 return ops->thaw_noirq;
346 case PM_EVENT_RESTORE:
347 return ops->restore_noirq;
348 #endif /* CONFIG_HIBERNATE_CALLBACKS */
354 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
356 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
357 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
358 ", may wakeup" : "");
361 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
364 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
365 dev_name(dev), pm_verb(state.event), info, error);
368 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
374 calltime = ktime_get();
375 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
376 do_div(usecs64, NSEC_PER_USEC);
380 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
381 info ?: "", info ? " " : "", pm_verb(state.event),
382 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
385 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
386 pm_message_t state, char *info)
394 calltime = initcall_debug_start(dev);
396 pm_dev_dbg(dev, state, info);
397 trace_device_pm_callback_start(dev, info, state.event);
399 trace_device_pm_callback_end(dev, error);
400 suspend_report_result(cb, error);
402 initcall_debug_report(dev, calltime, error, state, info);
407 #ifdef CONFIG_DPM_WATCHDOG
408 struct dpm_watchdog {
410 struct task_struct *tsk;
411 struct timer_list timer;
414 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
415 struct dpm_watchdog wd
418 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
419 * @data: Watchdog object address.
421 * Called when a driver has timed out suspending or resuming.
422 * There's not much we can do here to recover so panic() to
423 * capture a crash-dump in pstore.
425 static void dpm_watchdog_handler(unsigned long data)
427 struct dpm_watchdog *wd = (void *)data;
429 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
430 show_stack(wd->tsk, NULL);
431 panic("%s %s: unrecoverable failure\n",
432 dev_driver_string(wd->dev), dev_name(wd->dev));
436 * dpm_watchdog_set - Enable pm watchdog for given device.
437 * @wd: Watchdog. Must be allocated on the stack.
438 * @dev: Device to handle.
440 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
442 struct timer_list *timer = &wd->timer;
447 init_timer_on_stack(timer);
448 /* use same timeout value for both suspend and resume */
449 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
450 timer->function = dpm_watchdog_handler;
451 timer->data = (unsigned long)wd;
456 * dpm_watchdog_clear - Disable suspend/resume watchdog.
457 * @wd: Watchdog to disable.
459 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
461 struct timer_list *timer = &wd->timer;
463 del_timer_sync(timer);
464 destroy_timer_on_stack(timer);
467 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
468 #define dpm_watchdog_set(x, y)
469 #define dpm_watchdog_clear(x)
472 /*------------------------- Resume routines -------------------------*/
475 * device_resume_noirq - Execute an "early resume" callback for given device.
476 * @dev: Device to handle.
477 * @state: PM transition of the system being carried out.
478 * @async: If true, the device is being resumed asynchronously.
480 * The driver of @dev will not receive interrupts while this function is being
483 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
485 pm_callback_t callback = NULL;
492 if (dev->power.syscore || dev->power.direct_complete)
495 if (!dev->power.is_noirq_suspended)
498 dpm_wait(dev->parent, async);
500 if (dev->pm_domain) {
501 info = "noirq power domain ";
502 callback = pm_noirq_op(&dev->pm_domain->ops, state);
503 } else if (dev->type && dev->type->pm) {
504 info = "noirq type ";
505 callback = pm_noirq_op(dev->type->pm, state);
506 } else if (dev->class && dev->class->pm) {
507 info = "noirq class ";
508 callback = pm_noirq_op(dev->class->pm, state);
509 } else if (dev->bus && dev->bus->pm) {
511 callback = pm_noirq_op(dev->bus->pm, state);
514 if (!callback && dev->driver && dev->driver->pm) {
515 info = "noirq driver ";
516 callback = pm_noirq_op(dev->driver->pm, state);
519 error = dpm_run_callback(callback, dev, state, info);
520 dev->power.is_noirq_suspended = false;
523 complete_all(&dev->power.completion);
528 static bool is_async(struct device *dev)
530 return dev->power.async_suspend && pm_async_enabled
531 && !pm_trace_is_enabled();
534 static void async_resume_noirq(void *data, async_cookie_t cookie)
536 struct device *dev = (struct device *)data;
539 error = device_resume_noirq(dev, pm_transition, true);
541 pm_dev_err(dev, pm_transition, " async", error);
547 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
548 * @state: PM transition of the system being carried out.
550 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
551 * enable device drivers to receive interrupts.
553 void dpm_resume_noirq(pm_message_t state)
556 ktime_t starttime = ktime_get();
558 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
559 mutex_lock(&dpm_list_mtx);
560 pm_transition = state;
563 * Advanced the async threads upfront,
564 * in case the starting of async threads is
565 * delayed by non-async resuming devices.
567 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
568 reinit_completion(&dev->power.completion);
571 async_schedule(async_resume_noirq, dev);
575 while (!list_empty(&dpm_noirq_list)) {
576 dev = to_device(dpm_noirq_list.next);
578 list_move_tail(&dev->power.entry, &dpm_late_early_list);
579 mutex_unlock(&dpm_list_mtx);
581 if (!is_async(dev)) {
584 error = device_resume_noirq(dev, state, false);
586 suspend_stats.failed_resume_noirq++;
587 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
588 dpm_save_failed_dev(dev_name(dev));
589 pm_dev_err(dev, state, " noirq", error);
593 mutex_lock(&dpm_list_mtx);
596 mutex_unlock(&dpm_list_mtx);
597 async_synchronize_full();
598 dpm_show_time(starttime, state, "noirq");
599 resume_device_irqs();
600 device_wakeup_disarm_wake_irqs();
602 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
606 * device_resume_early - Execute an "early resume" callback for given device.
607 * @dev: Device to handle.
608 * @state: PM transition of the system being carried out.
609 * @async: If true, the device is being resumed asynchronously.
611 * Runtime PM is disabled for @dev while this function is being executed.
613 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
615 pm_callback_t callback = NULL;
622 if (dev->power.syscore || dev->power.direct_complete)
625 if (!dev->power.is_late_suspended)
628 dpm_wait(dev->parent, async);
630 if (dev->pm_domain) {
631 info = "early power domain ";
632 callback = pm_late_early_op(&dev->pm_domain->ops, state);
633 } else if (dev->type && dev->type->pm) {
634 info = "early type ";
635 callback = pm_late_early_op(dev->type->pm, state);
636 } else if (dev->class && dev->class->pm) {
637 info = "early class ";
638 callback = pm_late_early_op(dev->class->pm, state);
639 } else if (dev->bus && dev->bus->pm) {
641 callback = pm_late_early_op(dev->bus->pm, state);
644 if (!callback && dev->driver && dev->driver->pm) {
645 info = "early driver ";
646 callback = pm_late_early_op(dev->driver->pm, state);
649 error = dpm_run_callback(callback, dev, state, info);
650 dev->power.is_late_suspended = false;
655 pm_runtime_enable(dev);
656 complete_all(&dev->power.completion);
660 static void async_resume_early(void *data, async_cookie_t cookie)
662 struct device *dev = (struct device *)data;
665 error = device_resume_early(dev, pm_transition, true);
667 pm_dev_err(dev, pm_transition, " async", error);
673 * dpm_resume_early - Execute "early resume" callbacks for all devices.
674 * @state: PM transition of the system being carried out.
676 void dpm_resume_early(pm_message_t state)
679 ktime_t starttime = ktime_get();
681 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
682 mutex_lock(&dpm_list_mtx);
683 pm_transition = state;
686 * Advanced the async threads upfront,
687 * in case the starting of async threads is
688 * delayed by non-async resuming devices.
690 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
691 reinit_completion(&dev->power.completion);
694 async_schedule(async_resume_early, dev);
698 while (!list_empty(&dpm_late_early_list)) {
699 dev = to_device(dpm_late_early_list.next);
701 list_move_tail(&dev->power.entry, &dpm_suspended_list);
702 mutex_unlock(&dpm_list_mtx);
704 if (!is_async(dev)) {
707 error = device_resume_early(dev, state, false);
709 suspend_stats.failed_resume_early++;
710 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
711 dpm_save_failed_dev(dev_name(dev));
712 pm_dev_err(dev, state, " early", error);
715 mutex_lock(&dpm_list_mtx);
718 mutex_unlock(&dpm_list_mtx);
719 async_synchronize_full();
720 dpm_show_time(starttime, state, "early");
721 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
725 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
726 * @state: PM transition of the system being carried out.
728 void dpm_resume_start(pm_message_t state)
730 dpm_resume_noirq(state);
731 dpm_resume_early(state);
733 EXPORT_SYMBOL_GPL(dpm_resume_start);
736 * device_resume - Execute "resume" callbacks for given device.
737 * @dev: Device to handle.
738 * @state: PM transition of the system being carried out.
739 * @async: If true, the device is being resumed asynchronously.
741 static int device_resume(struct device *dev, pm_message_t state, bool async)
743 pm_callback_t callback = NULL;
746 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
751 if (dev->power.syscore)
754 if (dev->power.direct_complete) {
755 /* Match the pm_runtime_disable() in __device_suspend(). */
756 pm_runtime_enable(dev);
760 dpm_wait(dev->parent, async);
761 dpm_watchdog_set(&wd, dev);
765 * This is a fib. But we'll allow new children to be added below
766 * a resumed device, even if the device hasn't been completed yet.
768 dev->power.is_prepared = false;
770 if (!dev->power.is_suspended)
773 if (dev->pm_domain) {
774 info = "power domain ";
775 callback = pm_op(&dev->pm_domain->ops, state);
779 if (dev->type && dev->type->pm) {
781 callback = pm_op(dev->type->pm, state);
786 if (dev->class->pm) {
788 callback = pm_op(dev->class->pm, state);
790 } else if (dev->class->resume) {
791 info = "legacy class ";
792 callback = dev->class->resume;
800 callback = pm_op(dev->bus->pm, state);
801 } else if (dev->bus->resume) {
802 info = "legacy bus ";
803 callback = dev->bus->resume;
809 if (!callback && dev->driver && dev->driver->pm) {
811 callback = pm_op(dev->driver->pm, state);
815 error = dpm_run_callback(callback, dev, state, info);
816 dev->power.is_suspended = false;
820 dpm_watchdog_clear(&wd);
823 complete_all(&dev->power.completion);
830 static void async_resume(void *data, async_cookie_t cookie)
832 struct device *dev = (struct device *)data;
835 error = device_resume(dev, pm_transition, true);
837 pm_dev_err(dev, pm_transition, " async", error);
842 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
843 * @state: PM transition of the system being carried out.
845 * Execute the appropriate "resume" callback for all devices whose status
846 * indicates that they are suspended.
848 void dpm_resume(pm_message_t state)
851 ktime_t starttime = ktime_get();
853 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
856 mutex_lock(&dpm_list_mtx);
857 pm_transition = state;
860 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
861 reinit_completion(&dev->power.completion);
864 async_schedule(async_resume, dev);
868 while (!list_empty(&dpm_suspended_list)) {
869 dev = to_device(dpm_suspended_list.next);
871 if (!is_async(dev)) {
874 mutex_unlock(&dpm_list_mtx);
876 error = device_resume(dev, state, false);
878 suspend_stats.failed_resume++;
879 dpm_save_failed_step(SUSPEND_RESUME);
880 dpm_save_failed_dev(dev_name(dev));
881 pm_dev_err(dev, state, "", error);
884 mutex_lock(&dpm_list_mtx);
886 if (!list_empty(&dev->power.entry))
887 list_move_tail(&dev->power.entry, &dpm_prepared_list);
890 mutex_unlock(&dpm_list_mtx);
891 async_synchronize_full();
892 dpm_show_time(starttime, state, NULL);
895 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
899 * device_complete - Complete a PM transition for given device.
900 * @dev: Device to handle.
901 * @state: PM transition of the system being carried out.
903 static void device_complete(struct device *dev, pm_message_t state)
905 void (*callback)(struct device *) = NULL;
908 if (dev->power.syscore)
913 if (dev->pm_domain) {
914 info = "completing power domain ";
915 callback = dev->pm_domain->ops.complete;
916 } else if (dev->type && dev->type->pm) {
917 info = "completing type ";
918 callback = dev->type->pm->complete;
919 } else if (dev->class && dev->class->pm) {
920 info = "completing class ";
921 callback = dev->class->pm->complete;
922 } else if (dev->bus && dev->bus->pm) {
923 info = "completing bus ";
924 callback = dev->bus->pm->complete;
927 if (!callback && dev->driver && dev->driver->pm) {
928 info = "completing driver ";
929 callback = dev->driver->pm->complete;
933 pm_dev_dbg(dev, state, info);
943 * dpm_complete - Complete a PM transition for all non-sysdev devices.
944 * @state: PM transition of the system being carried out.
946 * Execute the ->complete() callbacks for all devices whose PM status is not
947 * DPM_ON (this allows new devices to be registered).
949 void dpm_complete(pm_message_t state)
951 struct list_head list;
953 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
956 INIT_LIST_HEAD(&list);
957 mutex_lock(&dpm_list_mtx);
958 while (!list_empty(&dpm_prepared_list)) {
959 struct device *dev = to_device(dpm_prepared_list.prev);
962 dev->power.is_prepared = false;
963 list_move(&dev->power.entry, &list);
964 mutex_unlock(&dpm_list_mtx);
966 trace_device_pm_callback_start(dev, "", state.event);
967 device_complete(dev, state);
968 trace_device_pm_callback_end(dev, 0);
970 mutex_lock(&dpm_list_mtx);
973 list_splice(&list, &dpm_list);
974 mutex_unlock(&dpm_list_mtx);
976 /* Allow device probing and trigger re-probing of deferred devices */
977 device_unblock_probing();
978 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
982 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
983 * @state: PM transition of the system being carried out.
985 * Execute "resume" callbacks for all devices and complete the PM transition of
988 void dpm_resume_end(pm_message_t state)
993 EXPORT_SYMBOL_GPL(dpm_resume_end);
996 /*------------------------- Suspend routines -------------------------*/
999 * resume_event - Return a "resume" message for given "suspend" sleep state.
1000 * @sleep_state: PM message representing a sleep state.
1002 * Return a PM message representing the resume event corresponding to given
1005 static pm_message_t resume_event(pm_message_t sleep_state)
1007 switch (sleep_state.event) {
1008 case PM_EVENT_SUSPEND:
1010 case PM_EVENT_FREEZE:
1011 case PM_EVENT_QUIESCE:
1012 return PMSG_RECOVER;
1013 case PM_EVENT_HIBERNATE:
1014 return PMSG_RESTORE;
1020 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1021 * @dev: Device to handle.
1022 * @state: PM transition of the system being carried out.
1023 * @async: If true, the device is being suspended asynchronously.
1025 * The driver of @dev will not receive interrupts while this function is being
1028 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1030 pm_callback_t callback = NULL;
1037 dpm_wait_for_children(dev, async);
1042 if (pm_wakeup_pending()) {
1043 async_error = -EBUSY;
1047 if (dev->power.syscore || dev->power.direct_complete)
1050 if (dev->pm_domain) {
1051 info = "noirq power domain ";
1052 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1053 } else if (dev->type && dev->type->pm) {
1054 info = "noirq type ";
1055 callback = pm_noirq_op(dev->type->pm, state);
1056 } else if (dev->class && dev->class->pm) {
1057 info = "noirq class ";
1058 callback = pm_noirq_op(dev->class->pm, state);
1059 } else if (dev->bus && dev->bus->pm) {
1060 info = "noirq bus ";
1061 callback = pm_noirq_op(dev->bus->pm, state);
1064 if (!callback && dev->driver && dev->driver->pm) {
1065 info = "noirq driver ";
1066 callback = pm_noirq_op(dev->driver->pm, state);
1069 error = dpm_run_callback(callback, dev, state, info);
1071 dev->power.is_noirq_suspended = true;
1073 async_error = error;
1076 complete_all(&dev->power.completion);
1077 TRACE_SUSPEND(error);
1081 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1083 struct device *dev = (struct device *)data;
1086 error = __device_suspend_noirq(dev, pm_transition, true);
1088 dpm_save_failed_dev(dev_name(dev));
1089 pm_dev_err(dev, pm_transition, " async", error);
1095 static int device_suspend_noirq(struct device *dev)
1097 reinit_completion(&dev->power.completion);
1099 if (is_async(dev)) {
1101 async_schedule(async_suspend_noirq, dev);
1104 return __device_suspend_noirq(dev, pm_transition, false);
1108 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1109 * @state: PM transition of the system being carried out.
1111 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1112 * handlers for all non-sysdev devices.
1114 int dpm_suspend_noirq(pm_message_t state)
1116 ktime_t starttime = ktime_get();
1119 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1121 device_wakeup_arm_wake_irqs();
1122 suspend_device_irqs();
1123 mutex_lock(&dpm_list_mtx);
1124 pm_transition = state;
1127 while (!list_empty(&dpm_late_early_list)) {
1128 struct device *dev = to_device(dpm_late_early_list.prev);
1131 mutex_unlock(&dpm_list_mtx);
1133 error = device_suspend_noirq(dev);
1135 mutex_lock(&dpm_list_mtx);
1137 pm_dev_err(dev, state, " noirq", error);
1138 dpm_save_failed_dev(dev_name(dev));
1142 if (!list_empty(&dev->power.entry))
1143 list_move(&dev->power.entry, &dpm_noirq_list);
1149 mutex_unlock(&dpm_list_mtx);
1150 async_synchronize_full();
1152 error = async_error;
1155 suspend_stats.failed_suspend_noirq++;
1156 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1157 dpm_resume_noirq(resume_event(state));
1159 dpm_show_time(starttime, state, "noirq");
1161 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1166 * device_suspend_late - Execute a "late suspend" callback for given device.
1167 * @dev: Device to handle.
1168 * @state: PM transition of the system being carried out.
1169 * @async: If true, the device is being suspended asynchronously.
1171 * Runtime PM is disabled for @dev while this function is being executed.
1173 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1175 pm_callback_t callback = NULL;
1182 __pm_runtime_disable(dev, false);
1184 dpm_wait_for_children(dev, async);
1189 if (pm_wakeup_pending()) {
1190 async_error = -EBUSY;
1194 if (dev->power.syscore || dev->power.direct_complete)
1197 if (dev->pm_domain) {
1198 info = "late power domain ";
1199 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1200 } else if (dev->type && dev->type->pm) {
1201 info = "late type ";
1202 callback = pm_late_early_op(dev->type->pm, state);
1203 } else if (dev->class && dev->class->pm) {
1204 info = "late class ";
1205 callback = pm_late_early_op(dev->class->pm, state);
1206 } else if (dev->bus && dev->bus->pm) {
1208 callback = pm_late_early_op(dev->bus->pm, state);
1211 if (!callback && dev->driver && dev->driver->pm) {
1212 info = "late driver ";
1213 callback = pm_late_early_op(dev->driver->pm, state);
1216 error = dpm_run_callback(callback, dev, state, info);
1218 dev->power.is_late_suspended = true;
1220 async_error = error;
1223 TRACE_SUSPEND(error);
1224 complete_all(&dev->power.completion);
1228 static void async_suspend_late(void *data, async_cookie_t cookie)
1230 struct device *dev = (struct device *)data;
1233 error = __device_suspend_late(dev, pm_transition, true);
1235 dpm_save_failed_dev(dev_name(dev));
1236 pm_dev_err(dev, pm_transition, " async", error);
1241 static int device_suspend_late(struct device *dev)
1243 reinit_completion(&dev->power.completion);
1245 if (is_async(dev)) {
1247 async_schedule(async_suspend_late, dev);
1251 return __device_suspend_late(dev, pm_transition, false);
1255 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1256 * @state: PM transition of the system being carried out.
1258 int dpm_suspend_late(pm_message_t state)
1260 ktime_t starttime = ktime_get();
1263 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1264 mutex_lock(&dpm_list_mtx);
1265 pm_transition = state;
1268 while (!list_empty(&dpm_suspended_list)) {
1269 struct device *dev = to_device(dpm_suspended_list.prev);
1272 mutex_unlock(&dpm_list_mtx);
1274 error = device_suspend_late(dev);
1276 mutex_lock(&dpm_list_mtx);
1277 if (!list_empty(&dev->power.entry))
1278 list_move(&dev->power.entry, &dpm_late_early_list);
1281 pm_dev_err(dev, state, " late", error);
1282 dpm_save_failed_dev(dev_name(dev));
1291 mutex_unlock(&dpm_list_mtx);
1292 async_synchronize_full();
1294 error = async_error;
1296 suspend_stats.failed_suspend_late++;
1297 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1298 dpm_resume_early(resume_event(state));
1300 dpm_show_time(starttime, state, "late");
1302 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1307 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1308 * @state: PM transition of the system being carried out.
1310 int dpm_suspend_end(pm_message_t state)
1312 int error = dpm_suspend_late(state);
1316 error = dpm_suspend_noirq(state);
1318 dpm_resume_early(resume_event(state));
1324 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1327 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1328 * @dev: Device to suspend.
1329 * @state: PM transition of the system being carried out.
1330 * @cb: Suspend callback to execute.
1331 * @info: string description of caller.
1333 static int legacy_suspend(struct device *dev, pm_message_t state,
1334 int (*cb)(struct device *dev, pm_message_t state),
1340 calltime = initcall_debug_start(dev);
1342 trace_device_pm_callback_start(dev, info, state.event);
1343 error = cb(dev, state);
1344 trace_device_pm_callback_end(dev, error);
1345 suspend_report_result(cb, error);
1347 initcall_debug_report(dev, calltime, error, state, info);
1353 * device_suspend - Execute "suspend" callbacks for given device.
1354 * @dev: Device to handle.
1355 * @state: PM transition of the system being carried out.
1356 * @async: If true, the device is being suspended asynchronously.
1358 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1360 pm_callback_t callback = NULL;
1363 char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1364 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1369 dpm_wait_for_children(dev, async);
1372 dev->power.direct_complete = false;
1377 * If a device configured to wake up the system from sleep states
1378 * has been suspended at run time and there's a resume request pending
1379 * for it, this is equivalent to the device signaling wakeup, so the
1380 * system suspend operation should be aborted.
1382 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1383 pm_wakeup_event(dev, 0);
1385 if (pm_wakeup_pending()) {
1386 pm_get_active_wakeup_sources(suspend_abort,
1387 MAX_SUSPEND_ABORT_LEN);
1388 log_suspend_abort_reason(suspend_abort);
1389 dev->power.direct_complete = false;
1390 async_error = -EBUSY;
1394 if (dev->power.syscore)
1397 /* Avoid direct_complete to let wakeup_path propagate. */
1398 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1399 dev->power.direct_complete = false;
1401 if (dev->power.direct_complete) {
1402 if (pm_runtime_status_suspended(dev)) {
1403 pm_runtime_disable(dev);
1404 if (pm_runtime_status_suspended(dev))
1407 pm_runtime_enable(dev);
1409 dev->power.direct_complete = false;
1412 dpm_watchdog_set(&wd, dev);
1415 if (dev->pm_domain) {
1416 info = "power domain ";
1417 callback = pm_op(&dev->pm_domain->ops, state);
1421 if (dev->type && dev->type->pm) {
1423 callback = pm_op(dev->type->pm, state);
1428 if (dev->class->pm) {
1430 callback = pm_op(dev->class->pm, state);
1432 } else if (dev->class->suspend) {
1433 pm_dev_dbg(dev, state, "legacy class ");
1434 error = legacy_suspend(dev, state, dev->class->suspend,
1443 callback = pm_op(dev->bus->pm, state);
1444 } else if (dev->bus->suspend) {
1445 pm_dev_dbg(dev, state, "legacy bus ");
1446 error = legacy_suspend(dev, state, dev->bus->suspend,
1453 if (!callback && dev->driver && dev->driver->pm) {
1455 callback = pm_op(dev->driver->pm, state);
1458 error = dpm_run_callback(callback, dev, state, info);
1462 struct device *parent = dev->parent;
1464 dev->power.is_suspended = true;
1466 spin_lock_irq(&parent->power.lock);
1468 dev->parent->power.direct_complete = false;
1469 if (dev->power.wakeup_path
1470 && !dev->parent->power.ignore_children)
1471 dev->parent->power.wakeup_path = true;
1473 spin_unlock_irq(&parent->power.lock);
1478 dpm_watchdog_clear(&wd);
1481 complete_all(&dev->power.completion);
1483 async_error = error;
1485 TRACE_SUSPEND(error);
1489 static void async_suspend(void *data, async_cookie_t cookie)
1491 struct device *dev = (struct device *)data;
1494 error = __device_suspend(dev, pm_transition, true);
1496 dpm_save_failed_dev(dev_name(dev));
1497 pm_dev_err(dev, pm_transition, " async", error);
1503 static int device_suspend(struct device *dev)
1505 reinit_completion(&dev->power.completion);
1507 if (is_async(dev)) {
1509 async_schedule(async_suspend, dev);
1513 return __device_suspend(dev, pm_transition, false);
1517 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1518 * @state: PM transition of the system being carried out.
1520 int dpm_suspend(pm_message_t state)
1522 ktime_t starttime = ktime_get();
1525 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1530 mutex_lock(&dpm_list_mtx);
1531 pm_transition = state;
1533 while (!list_empty(&dpm_prepared_list)) {
1534 struct device *dev = to_device(dpm_prepared_list.prev);
1537 mutex_unlock(&dpm_list_mtx);
1539 error = device_suspend(dev);
1541 mutex_lock(&dpm_list_mtx);
1543 pm_dev_err(dev, state, "", error);
1544 dpm_save_failed_dev(dev_name(dev));
1548 if (!list_empty(&dev->power.entry))
1549 list_move(&dev->power.entry, &dpm_suspended_list);
1554 mutex_unlock(&dpm_list_mtx);
1555 async_synchronize_full();
1557 error = async_error;
1559 suspend_stats.failed_suspend++;
1560 dpm_save_failed_step(SUSPEND_SUSPEND);
1562 dpm_show_time(starttime, state, NULL);
1563 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1568 * device_prepare - Prepare a device for system power transition.
1569 * @dev: Device to handle.
1570 * @state: PM transition of the system being carried out.
1572 * Execute the ->prepare() callback(s) for given device. No new children of the
1573 * device may be registered after this function has returned.
1575 static int device_prepare(struct device *dev, pm_message_t state)
1577 int (*callback)(struct device *) = NULL;
1580 if (dev->power.syscore)
1584 * If a device's parent goes into runtime suspend at the wrong time,
1585 * it won't be possible to resume the device. To prevent this we
1586 * block runtime suspend here, during the prepare phase, and allow
1587 * it again during the complete phase.
1589 pm_runtime_get_noresume(dev);
1593 dev->power.wakeup_path = device_may_wakeup(dev);
1595 if (dev->power.no_pm_callbacks) {
1596 ret = 1; /* Let device go direct_complete */
1601 callback = dev->pm_domain->ops.prepare;
1602 else if (dev->type && dev->type->pm)
1603 callback = dev->type->pm->prepare;
1604 else if (dev->class && dev->class->pm)
1605 callback = dev->class->pm->prepare;
1606 else if (dev->bus && dev->bus->pm)
1607 callback = dev->bus->pm->prepare;
1609 if (!callback && dev->driver && dev->driver->pm)
1610 callback = dev->driver->pm->prepare;
1613 ret = callback(dev);
1619 suspend_report_result(callback, ret);
1620 pm_runtime_put(dev);
1624 * A positive return value from ->prepare() means "this device appears
1625 * to be runtime-suspended and its state is fine, so if it really is
1626 * runtime-suspended, you can leave it in that state provided that you
1627 * will do the same thing with all of its descendants". This only
1628 * applies to suspend transitions, however.
1630 spin_lock_irq(&dev->power.lock);
1631 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1632 spin_unlock_irq(&dev->power.lock);
1637 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1638 * @state: PM transition of the system being carried out.
1640 * Execute the ->prepare() callback(s) for all devices.
1642 int dpm_prepare(pm_message_t state)
1646 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1650 * Give a chance for the known devices to complete their probes, before
1651 * disable probing of devices. This sync point is important at least
1652 * at boot time + hibernation restore.
1654 wait_for_device_probe();
1656 * It is unsafe if probing of devices will happen during suspend or
1657 * hibernation and system behavior will be unpredictable in this case.
1658 * So, let's prohibit device's probing here and defer their probes
1659 * instead. The normal behavior will be restored in dpm_complete().
1661 device_block_probing();
1663 mutex_lock(&dpm_list_mtx);
1664 while (!list_empty(&dpm_list)) {
1665 struct device *dev = to_device(dpm_list.next);
1668 mutex_unlock(&dpm_list_mtx);
1670 trace_device_pm_callback_start(dev, "", state.event);
1671 error = device_prepare(dev, state);
1672 trace_device_pm_callback_end(dev, error);
1674 mutex_lock(&dpm_list_mtx);
1676 if (error == -EAGAIN) {
1681 printk(KERN_INFO "PM: Device %s not prepared "
1682 "for power transition: code %d\n",
1683 dev_name(dev), error);
1687 dev->power.is_prepared = true;
1688 if (!list_empty(&dev->power.entry))
1689 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1692 mutex_unlock(&dpm_list_mtx);
1693 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1698 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1699 * @state: PM transition of the system being carried out.
1701 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1702 * callbacks for them.
1704 int dpm_suspend_start(pm_message_t state)
1708 error = dpm_prepare(state);
1710 suspend_stats.failed_prepare++;
1711 dpm_save_failed_step(SUSPEND_PREPARE);
1713 error = dpm_suspend(state);
1716 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1718 void __suspend_report_result(const char *function, void *fn, int ret)
1721 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1723 EXPORT_SYMBOL_GPL(__suspend_report_result);
1726 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1727 * @dev: Device to wait for.
1728 * @subordinate: Device that needs to wait for @dev.
1730 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1732 dpm_wait(dev, subordinate->power.async_suspend);
1735 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1738 * dpm_for_each_dev - device iterator.
1739 * @data: data for the callback.
1740 * @fn: function to be called for each device.
1742 * Iterate over devices in dpm_list, and call @fn for each device,
1745 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1753 list_for_each_entry(dev, &dpm_list, power.entry)
1757 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1759 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1764 return !ops->prepare &&
1766 !ops->suspend_late &&
1767 !ops->suspend_noirq &&
1768 !ops->resume_noirq &&
1769 !ops->resume_early &&
1774 void device_pm_check_callbacks(struct device *dev)
1776 spin_lock_irq(&dev->power.lock);
1777 dev->power.no_pm_callbacks =
1778 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1779 !dev->bus->suspend && !dev->bus->resume)) &&
1780 (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1781 !dev->class->suspend && !dev->class->resume)) &&
1782 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1783 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1784 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1785 !dev->driver->suspend && !dev->driver->resume));
1786 spin_unlock_irq(&dev->power.lock);