regmap: Support register patch sets
[linux-2.6.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31
32 #include "../base.h"
33 #include "power.h"
34
35 typedef int (*pm_callback_t)(struct device *);
36
37 /*
38  * The entries in the dpm_list list are in a depth first order, simply
39  * because children are guaranteed to be discovered after parents, and
40  * are inserted at the back of the list on discovery.
41  *
42  * Since device_pm_add() may be called with a device lock held,
43  * we must never try to acquire a device lock while holding
44  * dpm_list_mutex.
45  */
46
47 LIST_HEAD(dpm_list);
48 LIST_HEAD(dpm_prepared_list);
49 LIST_HEAD(dpm_suspended_list);
50 LIST_HEAD(dpm_noirq_list);
51
52 struct suspend_stats suspend_stats;
53 static DEFINE_MUTEX(dpm_list_mtx);
54 static pm_message_t pm_transition;
55
56 static int async_error;
57
58 /**
59  * device_pm_init - Initialize the PM-related part of a device object.
60  * @dev: Device object being initialized.
61  */
62 void device_pm_init(struct device *dev)
63 {
64         dev->power.is_prepared = false;
65         dev->power.is_suspended = false;
66         init_completion(&dev->power.completion);
67         complete_all(&dev->power.completion);
68         dev->power.wakeup = NULL;
69         spin_lock_init(&dev->power.lock);
70         pm_runtime_init(dev);
71         INIT_LIST_HEAD(&dev->power.entry);
72         dev->power.power_state = PMSG_INVALID;
73 }
74
75 /**
76  * device_pm_lock - Lock the list of active devices used by the PM core.
77  */
78 void device_pm_lock(void)
79 {
80         mutex_lock(&dpm_list_mtx);
81 }
82
83 /**
84  * device_pm_unlock - Unlock the list of active devices used by the PM core.
85  */
86 void device_pm_unlock(void)
87 {
88         mutex_unlock(&dpm_list_mtx);
89 }
90
91 /**
92  * device_pm_add - Add a device to the PM core's list of active devices.
93  * @dev: Device to add to the list.
94  */
95 void device_pm_add(struct device *dev)
96 {
97         pr_debug("PM: Adding info for %s:%s\n",
98                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
99         mutex_lock(&dpm_list_mtx);
100         if (dev->parent && dev->parent->power.is_prepared)
101                 dev_warn(dev, "parent %s should not be sleeping\n",
102                         dev_name(dev->parent));
103         list_add_tail(&dev->power.entry, &dpm_list);
104         dev_pm_qos_constraints_init(dev);
105         mutex_unlock(&dpm_list_mtx);
106 }
107
108 /**
109  * device_pm_remove - Remove a device from the PM core's list of active devices.
110  * @dev: Device to be removed from the list.
111  */
112 void device_pm_remove(struct device *dev)
113 {
114         pr_debug("PM: Removing info for %s:%s\n",
115                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
116         complete_all(&dev->power.completion);
117         mutex_lock(&dpm_list_mtx);
118         dev_pm_qos_constraints_destroy(dev);
119         list_del_init(&dev->power.entry);
120         mutex_unlock(&dpm_list_mtx);
121         device_wakeup_disable(dev);
122         pm_runtime_remove(dev);
123 }
124
125 /**
126  * device_pm_move_before - Move device in the PM core's list of active devices.
127  * @deva: Device to move in dpm_list.
128  * @devb: Device @deva should come before.
129  */
130 void device_pm_move_before(struct device *deva, struct device *devb)
131 {
132         pr_debug("PM: Moving %s:%s before %s:%s\n",
133                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
134                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
135         /* Delete deva from dpm_list and reinsert before devb. */
136         list_move_tail(&deva->power.entry, &devb->power.entry);
137 }
138
139 /**
140  * device_pm_move_after - Move device in the PM core's list of active devices.
141  * @deva: Device to move in dpm_list.
142  * @devb: Device @deva should come after.
143  */
144 void device_pm_move_after(struct device *deva, struct device *devb)
145 {
146         pr_debug("PM: Moving %s:%s after %s:%s\n",
147                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
148                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
149         /* Delete deva from dpm_list and reinsert after devb. */
150         list_move(&deva->power.entry, &devb->power.entry);
151 }
152
153 /**
154  * device_pm_move_last - Move device to end of the PM core's list of devices.
155  * @dev: Device to move in dpm_list.
156  */
157 void device_pm_move_last(struct device *dev)
158 {
159         pr_debug("PM: Moving %s:%s to end of list\n",
160                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
161         list_move_tail(&dev->power.entry, &dpm_list);
162 }
163
164 static ktime_t initcall_debug_start(struct device *dev)
165 {
166         ktime_t calltime = ktime_set(0, 0);
167
168         if (initcall_debug) {
169                 pr_info("calling  %s+ @ %i, parent: %s\n",
170                         dev_name(dev), task_pid_nr(current),
171                         dev->parent ? dev_name(dev->parent) : "none");
172                 calltime = ktime_get();
173         }
174
175         return calltime;
176 }
177
178 static void initcall_debug_report(struct device *dev, ktime_t calltime,
179                                   int error)
180 {
181         ktime_t delta, rettime;
182
183         if (initcall_debug) {
184                 rettime = ktime_get();
185                 delta = ktime_sub(rettime, calltime);
186                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
187                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
188         }
189 }
190
191 /**
192  * dpm_wait - Wait for a PM operation to complete.
193  * @dev: Device to wait for.
194  * @async: If unset, wait only if the device's power.async_suspend flag is set.
195  */
196 static void dpm_wait(struct device *dev, bool async)
197 {
198         if (!dev)
199                 return;
200
201         if (async || (pm_async_enabled && dev->power.async_suspend))
202                 wait_for_completion(&dev->power.completion);
203 }
204
205 static int dpm_wait_fn(struct device *dev, void *async_ptr)
206 {
207         dpm_wait(dev, *((bool *)async_ptr));
208         return 0;
209 }
210
211 static void dpm_wait_for_children(struct device *dev, bool async)
212 {
213        device_for_each_child(dev, &async, dpm_wait_fn);
214 }
215
216 /**
217  * pm_op - Return the PM operation appropriate for given PM event.
218  * @ops: PM operations to choose from.
219  * @state: PM transition of the system being carried out.
220  */
221 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
222 {
223         switch (state.event) {
224 #ifdef CONFIG_SUSPEND
225         case PM_EVENT_SUSPEND:
226                 return ops->suspend;
227         case PM_EVENT_RESUME:
228                 return ops->resume;
229 #endif /* CONFIG_SUSPEND */
230 #ifdef CONFIG_HIBERNATE_CALLBACKS
231         case PM_EVENT_FREEZE:
232         case PM_EVENT_QUIESCE:
233                 return ops->freeze;
234         case PM_EVENT_HIBERNATE:
235                 return ops->poweroff;
236         case PM_EVENT_THAW:
237         case PM_EVENT_RECOVER:
238                 return ops->thaw;
239                 break;
240         case PM_EVENT_RESTORE:
241                 return ops->restore;
242 #endif /* CONFIG_HIBERNATE_CALLBACKS */
243         }
244
245         return NULL;
246 }
247
248 /**
249  * pm_noirq_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  *
253  * The driver of @dev will not receive interrupts while this function is being
254  * executed.
255  */
256 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
257 {
258         switch (state.event) {
259 #ifdef CONFIG_SUSPEND
260         case PM_EVENT_SUSPEND:
261                 return ops->suspend_noirq;
262         case PM_EVENT_RESUME:
263                 return ops->resume_noirq;
264 #endif /* CONFIG_SUSPEND */
265 #ifdef CONFIG_HIBERNATE_CALLBACKS
266         case PM_EVENT_FREEZE:
267         case PM_EVENT_QUIESCE:
268                 return ops->freeze_noirq;
269         case PM_EVENT_HIBERNATE:
270                 return ops->poweroff_noirq;
271         case PM_EVENT_THAW:
272         case PM_EVENT_RECOVER:
273                 return ops->thaw_noirq;
274         case PM_EVENT_RESTORE:
275                 return ops->restore_noirq;
276 #endif /* CONFIG_HIBERNATE_CALLBACKS */
277         }
278
279         return NULL;
280 }
281
282 static char *pm_verb(int event)
283 {
284         switch (event) {
285         case PM_EVENT_SUSPEND:
286                 return "suspend";
287         case PM_EVENT_RESUME:
288                 return "resume";
289         case PM_EVENT_FREEZE:
290                 return "freeze";
291         case PM_EVENT_QUIESCE:
292                 return "quiesce";
293         case PM_EVENT_HIBERNATE:
294                 return "hibernate";
295         case PM_EVENT_THAW:
296                 return "thaw";
297         case PM_EVENT_RESTORE:
298                 return "restore";
299         case PM_EVENT_RECOVER:
300                 return "recover";
301         default:
302                 return "(unknown PM event)";
303         }
304 }
305
306 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
307 {
308         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
309                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
310                 ", may wakeup" : "");
311 }
312
313 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
314                         int error)
315 {
316         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
317                 dev_name(dev), pm_verb(state.event), info, error);
318 }
319
320 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
321 {
322         ktime_t calltime;
323         u64 usecs64;
324         int usecs;
325
326         calltime = ktime_get();
327         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
328         do_div(usecs64, NSEC_PER_USEC);
329         usecs = usecs64;
330         if (usecs == 0)
331                 usecs = 1;
332         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
333                 info ?: "", info ? " " : "", pm_verb(state.event),
334                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
335 }
336
337 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
338                             pm_message_t state, char *info)
339 {
340         ktime_t calltime;
341         int error;
342
343         if (!cb)
344                 return 0;
345
346         calltime = initcall_debug_start(dev);
347
348         pm_dev_dbg(dev, state, info);
349         error = cb(dev);
350         suspend_report_result(cb, error);
351
352         initcall_debug_report(dev, calltime, error);
353
354         return error;
355 }
356
357 /*------------------------- Resume routines -------------------------*/
358
359 /**
360  * device_resume_noirq - Execute an "early resume" callback for given device.
361  * @dev: Device to handle.
362  * @state: PM transition of the system being carried out.
363  *
364  * The driver of @dev will not receive interrupts while this function is being
365  * executed.
366  */
367 static int device_resume_noirq(struct device *dev, pm_message_t state)
368 {
369         pm_callback_t callback = NULL;
370         char *info = NULL;
371         int error = 0;
372
373         TRACE_DEVICE(dev);
374         TRACE_RESUME(0);
375
376         if (dev->pm_domain) {
377                 info = "EARLY power domain ";
378                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379         } else if (dev->type && dev->type->pm) {
380                 info = "EARLY type ";
381                 callback = pm_noirq_op(dev->type->pm, state);
382         } else if (dev->class && dev->class->pm) {
383                 info = "EARLY class ";
384                 callback = pm_noirq_op(dev->class->pm, state);
385         } else if (dev->bus && dev->bus->pm) {
386                 info = "EARLY bus ";
387                 callback = pm_noirq_op(dev->bus->pm, state);
388         }
389
390         if (!callback && dev->driver && dev->driver->pm) {
391                 info = "EARLY driver ";
392                 callback = pm_noirq_op(dev->driver->pm, state);
393         }
394
395         error = dpm_run_callback(callback, dev, state, info);
396
397         TRACE_RESUME(error);
398         return error;
399 }
400
401 /**
402  * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
403  * @state: PM transition of the system being carried out.
404  *
405  * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
406  * enable device drivers to receive interrupts.
407  */
408 void dpm_resume_noirq(pm_message_t state)
409 {
410         ktime_t starttime = ktime_get();
411
412         mutex_lock(&dpm_list_mtx);
413         while (!list_empty(&dpm_noirq_list)) {
414                 struct device *dev = to_device(dpm_noirq_list.next);
415                 int error;
416
417                 get_device(dev);
418                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
419                 mutex_unlock(&dpm_list_mtx);
420
421                 error = device_resume_noirq(dev, state);
422                 if (error) {
423                         suspend_stats.failed_resume_noirq++;
424                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
425                         dpm_save_failed_dev(dev_name(dev));
426                         pm_dev_err(dev, state, " early", error);
427                 }
428
429                 mutex_lock(&dpm_list_mtx);
430                 put_device(dev);
431         }
432         mutex_unlock(&dpm_list_mtx);
433         dpm_show_time(starttime, state, "early");
434         resume_device_irqs();
435 }
436 EXPORT_SYMBOL_GPL(dpm_resume_noirq);
437
438 /**
439  * device_resume - Execute "resume" callbacks for given device.
440  * @dev: Device to handle.
441  * @state: PM transition of the system being carried out.
442  * @async: If true, the device is being resumed asynchronously.
443  */
444 static int device_resume(struct device *dev, pm_message_t state, bool async)
445 {
446         pm_callback_t callback = NULL;
447         char *info = NULL;
448         int error = 0;
449         bool put = false;
450
451         TRACE_DEVICE(dev);
452         TRACE_RESUME(0);
453
454         dpm_wait(dev->parent, async);
455         device_lock(dev);
456
457         /*
458          * This is a fib.  But we'll allow new children to be added below
459          * a resumed device, even if the device hasn't been completed yet.
460          */
461         dev->power.is_prepared = false;
462
463         if (!dev->power.is_suspended)
464                 goto Unlock;
465
466         pm_runtime_enable(dev);
467         put = true;
468
469         if (dev->pm_domain) {
470                 info = "power domain ";
471                 callback = pm_op(&dev->pm_domain->ops, state);
472                 goto Driver;
473         }
474
475         if (dev->type && dev->type->pm) {
476                 info = "type ";
477                 callback = pm_op(dev->type->pm, state);
478                 goto Driver;
479         }
480
481         if (dev->class) {
482                 if (dev->class->pm) {
483                         info = "class ";
484                         callback = pm_op(dev->class->pm, state);
485                         goto Driver;
486                 } else if (dev->class->resume) {
487                         info = "legacy class ";
488                         callback = dev->class->resume;
489                         goto End;
490                 }
491         }
492
493         if (dev->bus) {
494                 if (dev->bus->pm) {
495                         info = "bus ";
496                         callback = pm_op(dev->bus->pm, state);
497                 } else if (dev->bus->resume) {
498                         info = "legacy bus ";
499                         callback = dev->bus->resume;
500                         goto End;
501                 }
502         }
503
504  Driver:
505         if (!callback && dev->driver && dev->driver->pm) {
506                 info = "driver ";
507                 callback = pm_op(dev->driver->pm, state);
508         }
509
510  End:
511         error = dpm_run_callback(callback, dev, state, info);
512         dev->power.is_suspended = false;
513
514  Unlock:
515         device_unlock(dev);
516         complete_all(&dev->power.completion);
517
518         TRACE_RESUME(error);
519
520         if (put)
521                 pm_runtime_put_sync(dev);
522
523         return error;
524 }
525
526 static void async_resume(void *data, async_cookie_t cookie)
527 {
528         struct device *dev = (struct device *)data;
529         int error;
530
531         error = device_resume(dev, pm_transition, true);
532         if (error)
533                 pm_dev_err(dev, pm_transition, " async", error);
534         put_device(dev);
535 }
536
537 static bool is_async(struct device *dev)
538 {
539         return dev->power.async_suspend && pm_async_enabled
540                 && !pm_trace_is_enabled();
541 }
542
543 /**
544  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
545  * @state: PM transition of the system being carried out.
546  *
547  * Execute the appropriate "resume" callback for all devices whose status
548  * indicates that they are suspended.
549  */
550 void dpm_resume(pm_message_t state)
551 {
552         struct device *dev;
553         ktime_t starttime = ktime_get();
554
555         might_sleep();
556
557         mutex_lock(&dpm_list_mtx);
558         pm_transition = state;
559         async_error = 0;
560
561         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
562                 INIT_COMPLETION(dev->power.completion);
563                 if (is_async(dev)) {
564                         get_device(dev);
565                         async_schedule(async_resume, dev);
566                 }
567         }
568
569         while (!list_empty(&dpm_suspended_list)) {
570                 dev = to_device(dpm_suspended_list.next);
571                 get_device(dev);
572                 if (!is_async(dev)) {
573                         int error;
574
575                         mutex_unlock(&dpm_list_mtx);
576
577                         error = device_resume(dev, state, false);
578                         if (error) {
579                                 suspend_stats.failed_resume++;
580                                 dpm_save_failed_step(SUSPEND_RESUME);
581                                 dpm_save_failed_dev(dev_name(dev));
582                                 pm_dev_err(dev, state, "", error);
583                         }
584
585                         mutex_lock(&dpm_list_mtx);
586                 }
587                 if (!list_empty(&dev->power.entry))
588                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
589                 put_device(dev);
590         }
591         mutex_unlock(&dpm_list_mtx);
592         async_synchronize_full();
593         dpm_show_time(starttime, state, NULL);
594 }
595
596 /**
597  * device_complete - Complete a PM transition for given device.
598  * @dev: Device to handle.
599  * @state: PM transition of the system being carried out.
600  */
601 static void device_complete(struct device *dev, pm_message_t state)
602 {
603         void (*callback)(struct device *) = NULL;
604         char *info = NULL;
605
606         device_lock(dev);
607
608         if (dev->pm_domain) {
609                 info = "completing power domain ";
610                 callback = dev->pm_domain->ops.complete;
611         } else if (dev->type && dev->type->pm) {
612                 info = "completing type ";
613                 callback = dev->type->pm->complete;
614         } else if (dev->class && dev->class->pm) {
615                 info = "completing class ";
616                 callback = dev->class->pm->complete;
617         } else if (dev->bus && dev->bus->pm) {
618                 info = "completing bus ";
619                 callback = dev->bus->pm->complete;
620         }
621
622         if (!callback && dev->driver && dev->driver->pm) {
623                 info = "completing driver ";
624                 callback = dev->driver->pm->complete;
625         }
626
627         if (callback) {
628                 pm_dev_dbg(dev, state, info);
629                 callback(dev);
630         }
631
632         device_unlock(dev);
633 }
634
635 /**
636  * dpm_complete - Complete a PM transition for all non-sysdev devices.
637  * @state: PM transition of the system being carried out.
638  *
639  * Execute the ->complete() callbacks for all devices whose PM status is not
640  * DPM_ON (this allows new devices to be registered).
641  */
642 void dpm_complete(pm_message_t state)
643 {
644         struct list_head list;
645
646         might_sleep();
647
648         INIT_LIST_HEAD(&list);
649         mutex_lock(&dpm_list_mtx);
650         while (!list_empty(&dpm_prepared_list)) {
651                 struct device *dev = to_device(dpm_prepared_list.prev);
652
653                 get_device(dev);
654                 dev->power.is_prepared = false;
655                 list_move(&dev->power.entry, &list);
656                 mutex_unlock(&dpm_list_mtx);
657
658                 device_complete(dev, state);
659
660                 mutex_lock(&dpm_list_mtx);
661                 put_device(dev);
662         }
663         list_splice(&list, &dpm_list);
664         mutex_unlock(&dpm_list_mtx);
665 }
666
667 /**
668  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
669  * @state: PM transition of the system being carried out.
670  *
671  * Execute "resume" callbacks for all devices and complete the PM transition of
672  * the system.
673  */
674 void dpm_resume_end(pm_message_t state)
675 {
676         dpm_resume(state);
677         dpm_complete(state);
678 }
679 EXPORT_SYMBOL_GPL(dpm_resume_end);
680
681
682 /*------------------------- Suspend routines -------------------------*/
683
684 /**
685  * resume_event - Return a "resume" message for given "suspend" sleep state.
686  * @sleep_state: PM message representing a sleep state.
687  *
688  * Return a PM message representing the resume event corresponding to given
689  * sleep state.
690  */
691 static pm_message_t resume_event(pm_message_t sleep_state)
692 {
693         switch (sleep_state.event) {
694         case PM_EVENT_SUSPEND:
695                 return PMSG_RESUME;
696         case PM_EVENT_FREEZE:
697         case PM_EVENT_QUIESCE:
698                 return PMSG_RECOVER;
699         case PM_EVENT_HIBERNATE:
700                 return PMSG_RESTORE;
701         }
702         return PMSG_ON;
703 }
704
705 /**
706  * device_suspend_noirq - Execute a "late suspend" callback for given device.
707  * @dev: Device to handle.
708  * @state: PM transition of the system being carried out.
709  *
710  * The driver of @dev will not receive interrupts while this function is being
711  * executed.
712  */
713 static int device_suspend_noirq(struct device *dev, pm_message_t state)
714 {
715         pm_callback_t callback = NULL;
716         char *info = NULL;
717
718         if (dev->pm_domain) {
719                 info = "LATE power domain ";
720                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
721         } else if (dev->type && dev->type->pm) {
722                 info = "LATE type ";
723                 callback = pm_noirq_op(dev->type->pm, state);
724         } else if (dev->class && dev->class->pm) {
725                 info = "LATE class ";
726                 callback = pm_noirq_op(dev->class->pm, state);
727         } else if (dev->bus && dev->bus->pm) {
728                 info = "LATE bus ";
729                 callback = pm_noirq_op(dev->bus->pm, state);
730         }
731
732         if (!callback && dev->driver && dev->driver->pm) {
733                 info = "LATE driver ";
734                 callback = pm_noirq_op(dev->driver->pm, state);
735         }
736
737         return dpm_run_callback(callback, dev, state, info);
738 }
739
740 /**
741  * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
742  * @state: PM transition of the system being carried out.
743  *
744  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
745  * handlers for all non-sysdev devices.
746  */
747 int dpm_suspend_noirq(pm_message_t state)
748 {
749         ktime_t starttime = ktime_get();
750         int error = 0;
751
752         suspend_device_irqs();
753         mutex_lock(&dpm_list_mtx);
754         while (!list_empty(&dpm_suspended_list)) {
755                 struct device *dev = to_device(dpm_suspended_list.prev);
756
757                 get_device(dev);
758                 mutex_unlock(&dpm_list_mtx);
759
760                 error = device_suspend_noirq(dev, state);
761
762                 mutex_lock(&dpm_list_mtx);
763                 if (error) {
764                         pm_dev_err(dev, state, " late", error);
765                         suspend_stats.failed_suspend_noirq++;
766                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
767                         dpm_save_failed_dev(dev_name(dev));
768                         put_device(dev);
769                         break;
770                 }
771                 if (!list_empty(&dev->power.entry))
772                         list_move(&dev->power.entry, &dpm_noirq_list);
773                 put_device(dev);
774         }
775         mutex_unlock(&dpm_list_mtx);
776         if (error)
777                 dpm_resume_noirq(resume_event(state));
778         else
779                 dpm_show_time(starttime, state, "late");
780         return error;
781 }
782 EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
783
784 /**
785  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
786  * @dev: Device to suspend.
787  * @state: PM transition of the system being carried out.
788  * @cb: Suspend callback to execute.
789  */
790 static int legacy_suspend(struct device *dev, pm_message_t state,
791                           int (*cb)(struct device *dev, pm_message_t state))
792 {
793         int error;
794         ktime_t calltime;
795
796         calltime = initcall_debug_start(dev);
797
798         error = cb(dev, state);
799         suspend_report_result(cb, error);
800
801         initcall_debug_report(dev, calltime, error);
802
803         return error;
804 }
805
806 /**
807  * device_suspend - Execute "suspend" callbacks for given device.
808  * @dev: Device to handle.
809  * @state: PM transition of the system being carried out.
810  * @async: If true, the device is being suspended asynchronously.
811  */
812 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
813 {
814         pm_callback_t callback = NULL;
815         char *info = NULL;
816         int error = 0;
817
818         dpm_wait_for_children(dev, async);
819
820         if (async_error)
821                 return 0;
822
823         pm_runtime_get_noresume(dev);
824         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
825                 pm_wakeup_event(dev, 0);
826
827         if (pm_wakeup_pending()) {
828                 pm_runtime_put_sync(dev);
829                 async_error = -EBUSY;
830                 return 0;
831         }
832
833         device_lock(dev);
834
835         if (dev->pm_domain) {
836                 info = "power domain ";
837                 callback = pm_op(&dev->pm_domain->ops, state);
838                 goto Run;
839         }
840
841         if (dev->type && dev->type->pm) {
842                 info = "type ";
843                 callback = pm_op(dev->type->pm, state);
844                 goto Run;
845         }
846
847         if (dev->class) {
848                 if (dev->class->pm) {
849                         info = "class ";
850                         callback = pm_op(dev->class->pm, state);
851                         goto Run;
852                 } else if (dev->class->suspend) {
853                         pm_dev_dbg(dev, state, "legacy class ");
854                         error = legacy_suspend(dev, state, dev->class->suspend);
855                         goto End;
856                 }
857         }
858
859         if (dev->bus) {
860                 if (dev->bus->pm) {
861                         info = "bus ";
862                         callback = pm_op(dev->bus->pm, state);
863                 } else if (dev->bus->suspend) {
864                         pm_dev_dbg(dev, state, "legacy bus ");
865                         error = legacy_suspend(dev, state, dev->bus->suspend);
866                         goto End;
867                 }
868         }
869
870  Run:
871         if (!callback && dev->driver && dev->driver->pm) {
872                 info = "driver ";
873                 callback = pm_op(dev->driver->pm, state);
874         }
875
876         error = dpm_run_callback(callback, dev, state, info);
877
878  End:
879         if (!error) {
880                 dev->power.is_suspended = true;
881                 if (dev->power.wakeup_path
882                     && dev->parent && !dev->parent->power.ignore_children)
883                         dev->parent->power.wakeup_path = true;
884         }
885
886         device_unlock(dev);
887         complete_all(&dev->power.completion);
888
889         if (error) {
890                 pm_runtime_put_sync(dev);
891                 async_error = error;
892         } else if (dev->power.is_suspended) {
893                 __pm_runtime_disable(dev, false);
894         }
895
896         return error;
897 }
898
899 static void async_suspend(void *data, async_cookie_t cookie)
900 {
901         struct device *dev = (struct device *)data;
902         int error;
903
904         error = __device_suspend(dev, pm_transition, true);
905         if (error) {
906                 dpm_save_failed_dev(dev_name(dev));
907                 pm_dev_err(dev, pm_transition, " async", error);
908         }
909
910         put_device(dev);
911 }
912
913 static int device_suspend(struct device *dev)
914 {
915         INIT_COMPLETION(dev->power.completion);
916
917         if (pm_async_enabled && dev->power.async_suspend) {
918                 get_device(dev);
919                 async_schedule(async_suspend, dev);
920                 return 0;
921         }
922
923         return __device_suspend(dev, pm_transition, false);
924 }
925
926 /**
927  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
928  * @state: PM transition of the system being carried out.
929  */
930 int dpm_suspend(pm_message_t state)
931 {
932         ktime_t starttime = ktime_get();
933         int error = 0;
934
935         might_sleep();
936
937         mutex_lock(&dpm_list_mtx);
938         pm_transition = state;
939         async_error = 0;
940         while (!list_empty(&dpm_prepared_list)) {
941                 struct device *dev = to_device(dpm_prepared_list.prev);
942
943                 get_device(dev);
944                 mutex_unlock(&dpm_list_mtx);
945
946                 error = device_suspend(dev);
947
948                 mutex_lock(&dpm_list_mtx);
949                 if (error) {
950                         pm_dev_err(dev, state, "", error);
951                         dpm_save_failed_dev(dev_name(dev));
952                         put_device(dev);
953                         break;
954                 }
955                 if (!list_empty(&dev->power.entry))
956                         list_move(&dev->power.entry, &dpm_suspended_list);
957                 put_device(dev);
958                 if (async_error)
959                         break;
960         }
961         mutex_unlock(&dpm_list_mtx);
962         async_synchronize_full();
963         if (!error)
964                 error = async_error;
965         if (error) {
966                 suspend_stats.failed_suspend++;
967                 dpm_save_failed_step(SUSPEND_SUSPEND);
968         } else
969                 dpm_show_time(starttime, state, NULL);
970         return error;
971 }
972
973 /**
974  * device_prepare - Prepare a device for system power transition.
975  * @dev: Device to handle.
976  * @state: PM transition of the system being carried out.
977  *
978  * Execute the ->prepare() callback(s) for given device.  No new children of the
979  * device may be registered after this function has returned.
980  */
981 static int device_prepare(struct device *dev, pm_message_t state)
982 {
983         int (*callback)(struct device *) = NULL;
984         char *info = NULL;
985         int error = 0;
986
987         device_lock(dev);
988
989         dev->power.wakeup_path = device_may_wakeup(dev);
990
991         if (dev->pm_domain) {
992                 info = "preparing power domain ";
993                 callback = dev->pm_domain->ops.prepare;
994         } else if (dev->type && dev->type->pm) {
995                 info = "preparing type ";
996                 callback = dev->type->pm->prepare;
997         } else if (dev->class && dev->class->pm) {
998                 info = "preparing class ";
999                 callback = dev->class->pm->prepare;
1000         } else if (dev->bus && dev->bus->pm) {
1001                 info = "preparing bus ";
1002                 callback = dev->bus->pm->prepare;
1003         }
1004
1005         if (!callback && dev->driver && dev->driver->pm) {
1006                 info = "preparing driver ";
1007                 callback = dev->driver->pm->prepare;
1008         }
1009
1010         if (callback) {
1011                 error = callback(dev);
1012                 suspend_report_result(callback, error);
1013         }
1014
1015         device_unlock(dev);
1016
1017         return error;
1018 }
1019
1020 /**
1021  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1022  * @state: PM transition of the system being carried out.
1023  *
1024  * Execute the ->prepare() callback(s) for all devices.
1025  */
1026 int dpm_prepare(pm_message_t state)
1027 {
1028         int error = 0;
1029
1030         might_sleep();
1031
1032         mutex_lock(&dpm_list_mtx);
1033         while (!list_empty(&dpm_list)) {
1034                 struct device *dev = to_device(dpm_list.next);
1035
1036                 get_device(dev);
1037                 mutex_unlock(&dpm_list_mtx);
1038
1039                 error = device_prepare(dev, state);
1040
1041                 mutex_lock(&dpm_list_mtx);
1042                 if (error) {
1043                         if (error == -EAGAIN) {
1044                                 put_device(dev);
1045                                 error = 0;
1046                                 continue;
1047                         }
1048                         printk(KERN_INFO "PM: Device %s not prepared "
1049                                 "for power transition: code %d\n",
1050                                 dev_name(dev), error);
1051                         put_device(dev);
1052                         break;
1053                 }
1054                 dev->power.is_prepared = true;
1055                 if (!list_empty(&dev->power.entry))
1056                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1057                 put_device(dev);
1058         }
1059         mutex_unlock(&dpm_list_mtx);
1060         return error;
1061 }
1062
1063 /**
1064  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1065  * @state: PM transition of the system being carried out.
1066  *
1067  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1068  * callbacks for them.
1069  */
1070 int dpm_suspend_start(pm_message_t state)
1071 {
1072         int error;
1073
1074         error = dpm_prepare(state);
1075         if (error) {
1076                 suspend_stats.failed_prepare++;
1077                 dpm_save_failed_step(SUSPEND_PREPARE);
1078         } else
1079                 error = dpm_suspend(state);
1080         return error;
1081 }
1082 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1083
1084 void __suspend_report_result(const char *function, void *fn, int ret)
1085 {
1086         if (ret)
1087                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1088 }
1089 EXPORT_SYMBOL_GPL(__suspend_report_result);
1090
1091 /**
1092  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1093  * @dev: Device to wait for.
1094  * @subordinate: Device that needs to wait for @dev.
1095  */
1096 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1097 {
1098         dpm_wait(dev, subordinate->power.async_suspend);
1099         return async_error;
1100 }
1101 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);