ARM: Tegra3: defconfig: remove dupicate entries
[linux-2.6.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <linux/timer.h>
32
33 #include "../base.h"
34 #include "power.h"
35
36 typedef int (*pm_callback_t)(struct device *);
37
38 /*
39  * The entries in the dpm_list list are in a depth first order, simply
40  * because children are guaranteed to be discovered after parents, and
41  * are inserted at the back of the list on discovery.
42  *
43  * Since device_pm_add() may be called with a device lock held,
44  * we must never try to acquire a device lock while holding
45  * dpm_list_mutex.
46  */
47
48 LIST_HEAD(dpm_list);
49 LIST_HEAD(dpm_prepared_list);
50 LIST_HEAD(dpm_suspended_list);
51 LIST_HEAD(dpm_late_early_list);
52 LIST_HEAD(dpm_noirq_list);
53
54 struct suspend_stats suspend_stats;
55 static DEFINE_MUTEX(dpm_list_mtx);
56 static pm_message_t pm_transition;
57
58 static void dpm_drv_timeout(unsigned long data);
59 struct dpm_drv_wd_data {
60         struct device *dev;
61         struct task_struct *tsk;
62 };
63
64 static int async_error;
65
66 /**
67  * device_pm_init - Initialize the PM-related part of a device object.
68  * @dev: Device object being initialized.
69  */
70 void device_pm_init(struct device *dev)
71 {
72         dev->power.is_prepared = false;
73         dev->power.is_suspended = false;
74         init_completion(&dev->power.completion);
75         complete_all(&dev->power.completion);
76         dev->power.wakeup = NULL;
77         spin_lock_init(&dev->power.lock);
78         pm_runtime_init(dev);
79         INIT_LIST_HEAD(&dev->power.entry);
80         dev->power.power_state = PMSG_INVALID;
81 }
82
83 /**
84  * device_pm_lock - Lock the list of active devices used by the PM core.
85  */
86 void device_pm_lock(void)
87 {
88         mutex_lock(&dpm_list_mtx);
89 }
90
91 /**
92  * device_pm_unlock - Unlock the list of active devices used by the PM core.
93  */
94 void device_pm_unlock(void)
95 {
96         mutex_unlock(&dpm_list_mtx);
97 }
98
99 /**
100  * device_pm_add - Add a device to the PM core's list of active devices.
101  * @dev: Device to add to the list.
102  */
103 void device_pm_add(struct device *dev)
104 {
105         pr_debug("PM: Adding info for %s:%s\n",
106                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
107         mutex_lock(&dpm_list_mtx);
108         if (dev->parent && dev->parent->power.is_prepared)
109                 dev_warn(dev, "parent %s should not be sleeping\n",
110                         dev_name(dev->parent));
111         list_add_tail(&dev->power.entry, &dpm_list);
112         dev_pm_qos_constraints_init(dev);
113         mutex_unlock(&dpm_list_mtx);
114 }
115
116 /**
117  * device_pm_remove - Remove a device from the PM core's list of active devices.
118  * @dev: Device to be removed from the list.
119  */
120 void device_pm_remove(struct device *dev)
121 {
122         pr_debug("PM: Removing info for %s:%s\n",
123                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
124         complete_all(&dev->power.completion);
125         mutex_lock(&dpm_list_mtx);
126         dev_pm_qos_constraints_destroy(dev);
127         list_del_init(&dev->power.entry);
128         mutex_unlock(&dpm_list_mtx);
129         device_wakeup_disable(dev);
130         pm_runtime_remove(dev);
131 }
132
133 /**
134  * device_pm_move_before - Move device in the PM core's list of active devices.
135  * @deva: Device to move in dpm_list.
136  * @devb: Device @deva should come before.
137  */
138 void device_pm_move_before(struct device *deva, struct device *devb)
139 {
140         pr_debug("PM: Moving %s:%s before %s:%s\n",
141                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
142                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
143         /* Delete deva from dpm_list and reinsert before devb. */
144         list_move_tail(&deva->power.entry, &devb->power.entry);
145 }
146
147 /**
148  * device_pm_move_after - Move device in the PM core's list of active devices.
149  * @deva: Device to move in dpm_list.
150  * @devb: Device @deva should come after.
151  */
152 void device_pm_move_after(struct device *deva, struct device *devb)
153 {
154         pr_debug("PM: Moving %s:%s after %s:%s\n",
155                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
156                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
157         /* Delete deva from dpm_list and reinsert after devb. */
158         list_move(&deva->power.entry, &devb->power.entry);
159 }
160
161 /**
162  * device_pm_move_last - Move device to end of the PM core's list of devices.
163  * @dev: Device to move in dpm_list.
164  */
165 void device_pm_move_last(struct device *dev)
166 {
167         pr_debug("PM: Moving %s:%s to end of list\n",
168                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
169         list_move_tail(&dev->power.entry, &dpm_list);
170 }
171
172 static ktime_t initcall_debug_start(struct device *dev)
173 {
174         ktime_t calltime = ktime_set(0, 0);
175
176         if (initcall_debug) {
177                 pr_info("calling  %s+ @ %i, parent: %s\n",
178                         dev_name(dev), task_pid_nr(current),
179                         dev->parent ? dev_name(dev->parent) : "none");
180                 calltime = ktime_get();
181         }
182
183         return calltime;
184 }
185
186 static void initcall_debug_report(struct device *dev, ktime_t calltime,
187                                   int error)
188 {
189         ktime_t delta, rettime;
190
191         if (initcall_debug) {
192                 rettime = ktime_get();
193                 delta = ktime_sub(rettime, calltime);
194                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
195                         error, (unsigned long long)ktime_to_ns(delta) >> 10);
196         }
197 }
198
199 /**
200  * dpm_wait - Wait for a PM operation to complete.
201  * @dev: Device to wait for.
202  * @async: If unset, wait only if the device's power.async_suspend flag is set.
203  */
204 static void dpm_wait(struct device *dev, bool async)
205 {
206         if (!dev)
207                 return;
208
209         if (async || (pm_async_enabled && dev->power.async_suspend))
210                 wait_for_completion(&dev->power.completion);
211 }
212
213 static int dpm_wait_fn(struct device *dev, void *async_ptr)
214 {
215         dpm_wait(dev, *((bool *)async_ptr));
216         return 0;
217 }
218
219 static void dpm_wait_for_children(struct device *dev, bool async)
220 {
221        device_for_each_child(dev, &async, dpm_wait_fn);
222 }
223
224 /**
225  * pm_op - Return the PM operation appropriate for given PM event.
226  * @ops: PM operations to choose from.
227  * @state: PM transition of the system being carried out.
228  */
229 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
230 {
231         switch (state.event) {
232 #ifdef CONFIG_SUSPEND
233         case PM_EVENT_SUSPEND:
234                 return ops->suspend;
235         case PM_EVENT_RESUME:
236                 return ops->resume;
237 #endif /* CONFIG_SUSPEND */
238 #ifdef CONFIG_HIBERNATE_CALLBACKS
239         case PM_EVENT_FREEZE:
240         case PM_EVENT_QUIESCE:
241                 return ops->freeze;
242         case PM_EVENT_HIBERNATE:
243                 return ops->poweroff;
244         case PM_EVENT_THAW:
245         case PM_EVENT_RECOVER:
246                 return ops->thaw;
247                 break;
248         case PM_EVENT_RESTORE:
249                 return ops->restore;
250 #endif /* CONFIG_HIBERNATE_CALLBACKS */
251         }
252
253         return NULL;
254 }
255
256 /**
257  * pm_late_early_op - Return the PM operation appropriate for given PM event.
258  * @ops: PM operations to choose from.
259  * @state: PM transition of the system being carried out.
260  *
261  * Runtime PM is disabled for @dev while this function is being executed.
262  */
263 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
264                                       pm_message_t state)
265 {
266         switch (state.event) {
267 #ifdef CONFIG_SUSPEND
268         case PM_EVENT_SUSPEND:
269                 return ops->suspend_late;
270         case PM_EVENT_RESUME:
271                 return ops->resume_early;
272 #endif /* CONFIG_SUSPEND */
273 #ifdef CONFIG_HIBERNATE_CALLBACKS
274         case PM_EVENT_FREEZE:
275         case PM_EVENT_QUIESCE:
276                 return ops->freeze_late;
277         case PM_EVENT_HIBERNATE:
278                 return ops->poweroff_late;
279         case PM_EVENT_THAW:
280         case PM_EVENT_RECOVER:
281                 return ops->thaw_early;
282         case PM_EVENT_RESTORE:
283                 return ops->restore_early;
284 #endif /* CONFIG_HIBERNATE_CALLBACKS */
285         }
286
287         return NULL;
288 }
289
290 /**
291  * pm_noirq_op - Return the PM operation appropriate for given PM event.
292  * @ops: PM operations to choose from.
293  * @state: PM transition of the system being carried out.
294  *
295  * The driver of @dev will not receive interrupts while this function is being
296  * executed.
297  */
298 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
299 {
300         switch (state.event) {
301 #ifdef CONFIG_SUSPEND
302         case PM_EVENT_SUSPEND:
303                 return ops->suspend_noirq;
304         case PM_EVENT_RESUME:
305                 return ops->resume_noirq;
306 #endif /* CONFIG_SUSPEND */
307 #ifdef CONFIG_HIBERNATE_CALLBACKS
308         case PM_EVENT_FREEZE:
309         case PM_EVENT_QUIESCE:
310                 return ops->freeze_noirq;
311         case PM_EVENT_HIBERNATE:
312                 return ops->poweroff_noirq;
313         case PM_EVENT_THAW:
314         case PM_EVENT_RECOVER:
315                 return ops->thaw_noirq;
316         case PM_EVENT_RESTORE:
317                 return ops->restore_noirq;
318 #endif /* CONFIG_HIBERNATE_CALLBACKS */
319         }
320
321         return NULL;
322 }
323
324 static char *pm_verb(int event)
325 {
326         switch (event) {
327         case PM_EVENT_SUSPEND:
328                 return "suspend";
329         case PM_EVENT_RESUME:
330                 return "resume";
331         case PM_EVENT_FREEZE:
332                 return "freeze";
333         case PM_EVENT_QUIESCE:
334                 return "quiesce";
335         case PM_EVENT_HIBERNATE:
336                 return "hibernate";
337         case PM_EVENT_THAW:
338                 return "thaw";
339         case PM_EVENT_RESTORE:
340                 return "restore";
341         case PM_EVENT_RECOVER:
342                 return "recover";
343         default:
344                 return "(unknown PM event)";
345         }
346 }
347
348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352                 ", may wakeup" : "");
353 }
354
355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356                         int error)
357 {
358         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359                 dev_name(dev), pm_verb(state.event), info, error);
360 }
361
362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364         ktime_t calltime;
365         u64 usecs64;
366         int usecs;
367
368         calltime = ktime_get();
369         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370         do_div(usecs64, NSEC_PER_USEC);
371         usecs = usecs64;
372         if (usecs == 0)
373                 usecs = 1;
374         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375                 info ?: "", info ? " " : "", pm_verb(state.event),
376                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378
379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380                             pm_message_t state, char *info)
381 {
382         ktime_t calltime;
383         int error;
384
385         if (!cb)
386                 return 0;
387
388         calltime = initcall_debug_start(dev);
389
390         pm_dev_dbg(dev, state, info);
391         error = cb(dev);
392         suspend_report_result(cb, error);
393
394         initcall_debug_report(dev, calltime, error);
395
396         return error;
397 }
398
399 /*------------------------- Resume routines -------------------------*/
400
401 /**
402  * device_resume_noirq - Execute an "early resume" callback for given device.
403  * @dev: Device to handle.
404  * @state: PM transition of the system being carried out.
405  *
406  * The driver of @dev will not receive interrupts while this function is being
407  * executed.
408  */
409 static int device_resume_noirq(struct device *dev, pm_message_t state)
410 {
411         pm_callback_t callback = NULL;
412         char *info = NULL;
413         int error = 0;
414
415         TRACE_DEVICE(dev);
416         TRACE_RESUME(0);
417
418         if (dev->pm_domain) {
419                 info = "noirq power domain ";
420                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
421         } else if (dev->type && dev->type->pm) {
422                 info = "noirq type ";
423                 callback = pm_noirq_op(dev->type->pm, state);
424         } else if (dev->class && dev->class->pm) {
425                 info = "noirq class ";
426                 callback = pm_noirq_op(dev->class->pm, state);
427         } else if (dev->bus && dev->bus->pm) {
428                 info = "noirq bus ";
429                 callback = pm_noirq_op(dev->bus->pm, state);
430         }
431
432         if (!callback && dev->driver && dev->driver->pm) {
433                 info = "noirq driver ";
434                 callback = pm_noirq_op(dev->driver->pm, state);
435         }
436
437         error = dpm_run_callback(callback, dev, state, info);
438
439         TRACE_RESUME(error);
440         return error;
441 }
442
443 /**
444  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
445  * @state: PM transition of the system being carried out.
446  *
447  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
448  * enable device drivers to receive interrupts.
449  */
450 static void dpm_resume_noirq(pm_message_t state)
451 {
452         ktime_t starttime = ktime_get();
453
454         mutex_lock(&dpm_list_mtx);
455         while (!list_empty(&dpm_noirq_list)) {
456                 struct device *dev = to_device(dpm_noirq_list.next);
457                 int error;
458
459                 get_device(dev);
460                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
461                 mutex_unlock(&dpm_list_mtx);
462
463                 error = device_resume_noirq(dev, state);
464                 if (error) {
465                         suspend_stats.failed_resume_noirq++;
466                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
467                         dpm_save_failed_dev(dev_name(dev));
468                         pm_dev_err(dev, state, " noirq", error);
469                 }
470
471                 mutex_lock(&dpm_list_mtx);
472                 put_device(dev);
473         }
474         mutex_unlock(&dpm_list_mtx);
475         dpm_show_time(starttime, state, "noirq");
476         resume_device_irqs();
477 }
478
479 /**
480  * device_resume_early - Execute an "early resume" callback for given device.
481  * @dev: Device to handle.
482  * @state: PM transition of the system being carried out.
483  *
484  * Runtime PM is disabled for @dev while this function is being executed.
485  */
486 static int device_resume_early(struct device *dev, pm_message_t state)
487 {
488         pm_callback_t callback = NULL;
489         char *info = NULL;
490         int error = 0;
491
492         TRACE_DEVICE(dev);
493         TRACE_RESUME(0);
494
495         if (dev->pm_domain) {
496                 info = "early power domain ";
497                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
498         } else if (dev->type && dev->type->pm) {
499                 info = "early type ";
500                 callback = pm_late_early_op(dev->type->pm, state);
501         } else if (dev->class && dev->class->pm) {
502                 info = "early class ";
503                 callback = pm_late_early_op(dev->class->pm, state);
504         } else if (dev->bus && dev->bus->pm) {
505                 info = "early bus ";
506                 callback = pm_late_early_op(dev->bus->pm, state);
507         }
508
509         if (!callback && dev->driver && dev->driver->pm) {
510                 info = "early driver ";
511                 callback = pm_late_early_op(dev->driver->pm, state);
512         }
513
514         error = dpm_run_callback(callback, dev, state, info);
515
516         TRACE_RESUME(error);
517         return error;
518 }
519
520 /**
521  * dpm_resume_early - Execute "early resume" callbacks for all devices.
522  * @state: PM transition of the system being carried out.
523  */
524 static void dpm_resume_early(pm_message_t state)
525 {
526         ktime_t starttime = ktime_get();
527
528         mutex_lock(&dpm_list_mtx);
529         while (!list_empty(&dpm_late_early_list)) {
530                 struct device *dev = to_device(dpm_late_early_list.next);
531                 int error;
532
533                 get_device(dev);
534                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
535                 mutex_unlock(&dpm_list_mtx);
536
537                 error = device_resume_early(dev, state);
538                 if (error) {
539                         suspend_stats.failed_resume_early++;
540                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
541                         dpm_save_failed_dev(dev_name(dev));
542                         pm_dev_err(dev, state, " early", error);
543                 }
544
545                 mutex_lock(&dpm_list_mtx);
546                 put_device(dev);
547         }
548         mutex_unlock(&dpm_list_mtx);
549         dpm_show_time(starttime, state, "early");
550 }
551
552 /**
553  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
554  * @state: PM transition of the system being carried out.
555  */
556 void dpm_resume_start(pm_message_t state)
557 {
558         dpm_resume_noirq(state);
559         dpm_resume_early(state);
560 }
561 EXPORT_SYMBOL_GPL(dpm_resume_start);
562
563 /**
564  * device_resume - Execute "resume" callbacks for given device.
565  * @dev: Device to handle.
566  * @state: PM transition of the system being carried out.
567  * @async: If true, the device is being resumed asynchronously.
568  */
569 static int device_resume(struct device *dev, pm_message_t state, bool async)
570 {
571         pm_callback_t callback = NULL;
572         char *info = NULL;
573         int error = 0;
574         bool put = false;
575
576         TRACE_DEVICE(dev);
577         TRACE_RESUME(0);
578
579         dpm_wait(dev->parent, async);
580         device_lock(dev);
581
582         /*
583          * This is a fib.  But we'll allow new children to be added below
584          * a resumed device, even if the device hasn't been completed yet.
585          */
586         dev->power.is_prepared = false;
587
588         if (!dev->power.is_suspended)
589                 goto Unlock;
590
591         pm_runtime_enable(dev);
592         put = true;
593
594         if (dev->pm_domain) {
595                 info = "power domain ";
596                 callback = pm_op(&dev->pm_domain->ops, state);
597                 goto Driver;
598         }
599
600         if (dev->type && dev->type->pm) {
601                 info = "type ";
602                 callback = pm_op(dev->type->pm, state);
603                 goto Driver;
604         }
605
606         if (dev->class) {
607                 if (dev->class->pm) {
608                         info = "class ";
609                         callback = pm_op(dev->class->pm, state);
610                         goto Driver;
611                 } else if (dev->class->resume) {
612                         info = "legacy class ";
613                         callback = dev->class->resume;
614                         goto End;
615                 }
616         }
617
618         if (dev->bus) {
619                 if (dev->bus->pm) {
620                         info = "bus ";
621                         callback = pm_op(dev->bus->pm, state);
622                 } else if (dev->bus->resume) {
623                         info = "legacy bus ";
624                         callback = dev->bus->resume;
625                         goto End;
626                 }
627         }
628
629  Driver:
630         if (!callback && dev->driver && dev->driver->pm) {
631                 info = "driver ";
632                 callback = pm_op(dev->driver->pm, state);
633         }
634
635  End:
636         error = dpm_run_callback(callback, dev, state, info);
637         dev->power.is_suspended = false;
638
639  Unlock:
640         device_unlock(dev);
641         complete_all(&dev->power.completion);
642
643         TRACE_RESUME(error);
644
645         if (put)
646                 pm_runtime_put_sync(dev);
647
648         return error;
649 }
650
651 static void async_resume(void *data, async_cookie_t cookie)
652 {
653         struct device *dev = (struct device *)data;
654         int error;
655
656         error = device_resume(dev, pm_transition, true);
657         if (error)
658                 pm_dev_err(dev, pm_transition, " async", error);
659         put_device(dev);
660 }
661
662 static bool is_async(struct device *dev)
663 {
664         return dev->power.async_suspend && pm_async_enabled
665                 && !pm_trace_is_enabled();
666 }
667
668 /**
669  *      dpm_drv_timeout - Driver suspend / resume watchdog handler
670  *      @data: struct device which timed out
671  *
672  *      Called when a driver has timed out suspending or resuming.
673  *      There's not much we can do here to recover so
674  *      BUG() out for a crash-dump
675  *
676  */
677 static void dpm_drv_timeout(unsigned long data)
678 {
679         struct dpm_drv_wd_data *wd_data = (void *)data;
680         struct device *dev = wd_data->dev;
681         struct task_struct *tsk = wd_data->tsk;
682
683         printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
684                (dev->driver ? dev->driver->name : "no driver"));
685
686         printk(KERN_EMERG "dpm suspend stack:\n");
687         show_stack(tsk, NULL);
688
689         BUG();
690 }
691
692 /**
693  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
694  * @state: PM transition of the system being carried out.
695  *
696  * Execute the appropriate "resume" callback for all devices whose status
697  * indicates that they are suspended.
698  */
699 void dpm_resume(pm_message_t state)
700 {
701         struct device *dev;
702         ktime_t starttime = ktime_get();
703
704         might_sleep();
705
706         mutex_lock(&dpm_list_mtx);
707         pm_transition = state;
708         async_error = 0;
709
710         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
711                 INIT_COMPLETION(dev->power.completion);
712                 if (is_async(dev)) {
713                         get_device(dev);
714                         async_schedule(async_resume, dev);
715                 }
716         }
717
718         while (!list_empty(&dpm_suspended_list)) {
719                 dev = to_device(dpm_suspended_list.next);
720                 get_device(dev);
721                 if (!is_async(dev)) {
722                         int error;
723
724                         mutex_unlock(&dpm_list_mtx);
725
726                         error = device_resume(dev, state, false);
727                         if (error) {
728                                 suspend_stats.failed_resume++;
729                                 dpm_save_failed_step(SUSPEND_RESUME);
730                                 dpm_save_failed_dev(dev_name(dev));
731                                 pm_dev_err(dev, state, "", error);
732                         }
733
734                         mutex_lock(&dpm_list_mtx);
735                 }
736                 if (!list_empty(&dev->power.entry))
737                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
738                 put_device(dev);
739         }
740         mutex_unlock(&dpm_list_mtx);
741         async_synchronize_full();
742         dpm_show_time(starttime, state, NULL);
743 }
744
745 /**
746  * device_complete - Complete a PM transition for given device.
747  * @dev: Device to handle.
748  * @state: PM transition of the system being carried out.
749  */
750 static void device_complete(struct device *dev, pm_message_t state)
751 {
752         void (*callback)(struct device *) = NULL;
753         char *info = NULL;
754
755         device_lock(dev);
756
757         if (dev->pm_domain) {
758                 info = "completing power domain ";
759                 callback = dev->pm_domain->ops.complete;
760         } else if (dev->type && dev->type->pm) {
761                 info = "completing type ";
762                 callback = dev->type->pm->complete;
763         } else if (dev->class && dev->class->pm) {
764                 info = "completing class ";
765                 callback = dev->class->pm->complete;
766         } else if (dev->bus && dev->bus->pm) {
767                 info = "completing bus ";
768                 callback = dev->bus->pm->complete;
769         }
770
771         if (!callback && dev->driver && dev->driver->pm) {
772                 info = "completing driver ";
773                 callback = dev->driver->pm->complete;
774         }
775
776         if (callback) {
777                 pm_dev_dbg(dev, state, info);
778                 callback(dev);
779         }
780
781         device_unlock(dev);
782 }
783
784 /**
785  * dpm_complete - Complete a PM transition for all non-sysdev devices.
786  * @state: PM transition of the system being carried out.
787  *
788  * Execute the ->complete() callbacks for all devices whose PM status is not
789  * DPM_ON (this allows new devices to be registered).
790  */
791 void dpm_complete(pm_message_t state)
792 {
793         struct list_head list;
794
795         might_sleep();
796
797         INIT_LIST_HEAD(&list);
798         mutex_lock(&dpm_list_mtx);
799         while (!list_empty(&dpm_prepared_list)) {
800                 struct device *dev = to_device(dpm_prepared_list.prev);
801
802                 get_device(dev);
803                 dev->power.is_prepared = false;
804                 list_move(&dev->power.entry, &list);
805                 mutex_unlock(&dpm_list_mtx);
806
807                 device_complete(dev, state);
808
809                 mutex_lock(&dpm_list_mtx);
810                 put_device(dev);
811         }
812         list_splice(&list, &dpm_list);
813         mutex_unlock(&dpm_list_mtx);
814 }
815
816 /**
817  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
818  * @state: PM transition of the system being carried out.
819  *
820  * Execute "resume" callbacks for all devices and complete the PM transition of
821  * the system.
822  */
823 void dpm_resume_end(pm_message_t state)
824 {
825         dpm_resume(state);
826         dpm_complete(state);
827 }
828 EXPORT_SYMBOL_GPL(dpm_resume_end);
829
830
831 /*------------------------- Suspend routines -------------------------*/
832
833 /**
834  * resume_event - Return a "resume" message for given "suspend" sleep state.
835  * @sleep_state: PM message representing a sleep state.
836  *
837  * Return a PM message representing the resume event corresponding to given
838  * sleep state.
839  */
840 static pm_message_t resume_event(pm_message_t sleep_state)
841 {
842         switch (sleep_state.event) {
843         case PM_EVENT_SUSPEND:
844                 return PMSG_RESUME;
845         case PM_EVENT_FREEZE:
846         case PM_EVENT_QUIESCE:
847                 return PMSG_RECOVER;
848         case PM_EVENT_HIBERNATE:
849                 return PMSG_RESTORE;
850         }
851         return PMSG_ON;
852 }
853
854 /**
855  * device_suspend_noirq - Execute a "late suspend" callback for given device.
856  * @dev: Device to handle.
857  * @state: PM transition of the system being carried out.
858  *
859  * The driver of @dev will not receive interrupts while this function is being
860  * executed.
861  */
862 static int device_suspend_noirq(struct device *dev, pm_message_t state)
863 {
864         pm_callback_t callback = NULL;
865         char *info = NULL;
866
867         if (dev->pm_domain) {
868                 info = "noirq power domain ";
869                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
870         } else if (dev->type && dev->type->pm) {
871                 info = "noirq type ";
872                 callback = pm_noirq_op(dev->type->pm, state);
873         } else if (dev->class && dev->class->pm) {
874                 info = "noirq class ";
875                 callback = pm_noirq_op(dev->class->pm, state);
876         } else if (dev->bus && dev->bus->pm) {
877                 info = "noirq bus ";
878                 callback = pm_noirq_op(dev->bus->pm, state);
879         }
880
881         if (!callback && dev->driver && dev->driver->pm) {
882                 info = "noirq driver ";
883                 callback = pm_noirq_op(dev->driver->pm, state);
884         }
885
886         return dpm_run_callback(callback, dev, state, info);
887 }
888
889 /**
890  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
891  * @state: PM transition of the system being carried out.
892  *
893  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
894  * handlers for all non-sysdev devices.
895  */
896 static int dpm_suspend_noirq(pm_message_t state)
897 {
898         ktime_t starttime = ktime_get();
899         int error = 0;
900
901         suspend_device_irqs();
902         mutex_lock(&dpm_list_mtx);
903         while (!list_empty(&dpm_late_early_list)) {
904                 struct device *dev = to_device(dpm_late_early_list.prev);
905
906                 get_device(dev);
907                 mutex_unlock(&dpm_list_mtx);
908
909                 error = device_suspend_noirq(dev, state);
910
911                 mutex_lock(&dpm_list_mtx);
912                 if (error) {
913                         pm_dev_err(dev, state, " noirq", error);
914                         suspend_stats.failed_suspend_noirq++;
915                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
916                         dpm_save_failed_dev(dev_name(dev));
917                         put_device(dev);
918                         break;
919                 }
920                 if (!list_empty(&dev->power.entry))
921                         list_move(&dev->power.entry, &dpm_noirq_list);
922                 put_device(dev);
923         }
924         mutex_unlock(&dpm_list_mtx);
925         if (error)
926                 dpm_resume_noirq(resume_event(state));
927         else
928                 dpm_show_time(starttime, state, "noirq");
929         return error;
930 }
931
932 /**
933  * device_suspend_late - Execute a "late suspend" callback for given device.
934  * @dev: Device to handle.
935  * @state: PM transition of the system being carried out.
936  *
937  * Runtime PM is disabled for @dev while this function is being executed.
938  */
939 static int device_suspend_late(struct device *dev, pm_message_t state)
940 {
941         pm_callback_t callback = NULL;
942         char *info = NULL;
943
944         if (dev->pm_domain) {
945                 info = "late power domain ";
946                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
947         } else if (dev->type && dev->type->pm) {
948                 info = "late type ";
949                 callback = pm_late_early_op(dev->type->pm, state);
950         } else if (dev->class && dev->class->pm) {
951                 info = "late class ";
952                 callback = pm_late_early_op(dev->class->pm, state);
953         } else if (dev->bus && dev->bus->pm) {
954                 info = "late bus ";
955                 callback = pm_late_early_op(dev->bus->pm, state);
956         }
957
958         if (!callback && dev->driver && dev->driver->pm) {
959                 info = "late driver ";
960                 callback = pm_late_early_op(dev->driver->pm, state);
961         }
962
963         return dpm_run_callback(callback, dev, state, info);
964 }
965
966 /**
967  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
968  * @state: PM transition of the system being carried out.
969  */
970 static int dpm_suspend_late(pm_message_t state)
971 {
972         ktime_t starttime = ktime_get();
973         int error = 0;
974
975         mutex_lock(&dpm_list_mtx);
976         while (!list_empty(&dpm_suspended_list)) {
977                 struct device *dev = to_device(dpm_suspended_list.prev);
978
979                 get_device(dev);
980                 mutex_unlock(&dpm_list_mtx);
981
982                 error = device_suspend_late(dev, state);
983
984                 mutex_lock(&dpm_list_mtx);
985                 if (error) {
986                         pm_dev_err(dev, state, " late", error);
987                         suspend_stats.failed_suspend_late++;
988                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
989                         dpm_save_failed_dev(dev_name(dev));
990                         put_device(dev);
991                         break;
992                 }
993                 if (!list_empty(&dev->power.entry))
994                         list_move(&dev->power.entry, &dpm_late_early_list);
995                 put_device(dev);
996         }
997         mutex_unlock(&dpm_list_mtx);
998         if (error)
999                 dpm_resume_early(resume_event(state));
1000         else
1001                 dpm_show_time(starttime, state, "late");
1002
1003         return error;
1004 }
1005
1006 /**
1007  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1008  * @state: PM transition of the system being carried out.
1009  */
1010 int dpm_suspend_end(pm_message_t state)
1011 {
1012         int error = dpm_suspend_late(state);
1013
1014         return error ? : dpm_suspend_noirq(state);
1015 }
1016 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1017
1018 /**
1019  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1020  * @dev: Device to suspend.
1021  * @state: PM transition of the system being carried out.
1022  * @cb: Suspend callback to execute.
1023  */
1024 static int legacy_suspend(struct device *dev, pm_message_t state,
1025                           int (*cb)(struct device *dev, pm_message_t state))
1026 {
1027         int error;
1028         ktime_t calltime;
1029
1030         calltime = initcall_debug_start(dev);
1031
1032         error = cb(dev, state);
1033         suspend_report_result(cb, error);
1034
1035         initcall_debug_report(dev, calltime, error);
1036
1037         return error;
1038 }
1039
1040 /**
1041  * device_suspend - Execute "suspend" callbacks for given device.
1042  * @dev: Device to handle.
1043  * @state: PM transition of the system being carried out.
1044  * @async: If true, the device is being suspended asynchronously.
1045  */
1046 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1047 {
1048         pm_callback_t callback = NULL;
1049         char *info = NULL;
1050         int error = 0;
1051         struct timer_list timer;
1052         struct dpm_drv_wd_data data;
1053
1054         dpm_wait_for_children(dev, async);
1055
1056         if (async_error)
1057                 return 0;
1058
1059         pm_runtime_get_noresume(dev);
1060         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1061                 pm_wakeup_event(dev, 0);
1062
1063         if (pm_wakeup_pending()) {
1064                 pm_runtime_put_sync(dev);
1065                 async_error = -EBUSY;
1066                 return 0;
1067         }
1068
1069         data.dev = dev;
1070         data.tsk = get_current();
1071         init_timer_on_stack(&timer);
1072         timer.expires = jiffies + HZ * 12;
1073         timer.function = dpm_drv_timeout;
1074         timer.data = (unsigned long)&data;
1075         add_timer(&timer);
1076
1077         device_lock(dev);
1078
1079         if (dev->pm_domain) {
1080                 info = "power domain ";
1081                 callback = pm_op(&dev->pm_domain->ops, state);
1082                 goto Run;
1083         }
1084
1085         if (dev->type && dev->type->pm) {
1086                 info = "type ";
1087                 callback = pm_op(dev->type->pm, state);
1088                 goto Run;
1089         }
1090
1091         if (dev->class) {
1092                 if (dev->class->pm) {
1093                         info = "class ";
1094                         callback = pm_op(dev->class->pm, state);
1095                         goto Run;
1096                 } else if (dev->class->suspend) {
1097                         pm_dev_dbg(dev, state, "legacy class ");
1098                         error = legacy_suspend(dev, state, dev->class->suspend);
1099                         goto End;
1100                 }
1101         }
1102
1103         if (dev->bus) {
1104                 if (dev->bus->pm) {
1105                         info = "bus ";
1106                         callback = pm_op(dev->bus->pm, state);
1107                 } else if (dev->bus->suspend) {
1108                         pm_dev_dbg(dev, state, "legacy bus ");
1109                         error = legacy_suspend(dev, state, dev->bus->suspend);
1110                         goto End;
1111                 }
1112         }
1113
1114  Run:
1115         if (!callback && dev->driver && dev->driver->pm) {
1116                 info = "driver ";
1117                 callback = pm_op(dev->driver->pm, state);
1118         }
1119
1120         error = dpm_run_callback(callback, dev, state, info);
1121
1122  End:
1123         if (!error) {
1124                 dev->power.is_suspended = true;
1125                 if (dev->power.wakeup_path
1126                     && dev->parent && !dev->parent->power.ignore_children)
1127                         dev->parent->power.wakeup_path = true;
1128         }
1129
1130         device_unlock(dev);
1131
1132         del_timer_sync(&timer);
1133         destroy_timer_on_stack(&timer);
1134
1135         complete_all(&dev->power.completion);
1136
1137         if (error) {
1138                 pm_runtime_put_sync(dev);
1139                 async_error = error;
1140         } else if (dev->power.is_suspended) {
1141                 __pm_runtime_disable(dev, false);
1142         }
1143
1144         return error;
1145 }
1146
1147 static void async_suspend(void *data, async_cookie_t cookie)
1148 {
1149         struct device *dev = (struct device *)data;
1150         int error;
1151
1152         error = __device_suspend(dev, pm_transition, true);
1153         if (error) {
1154                 dpm_save_failed_dev(dev_name(dev));
1155                 pm_dev_err(dev, pm_transition, " async", error);
1156         }
1157
1158         put_device(dev);
1159 }
1160
1161 static int device_suspend(struct device *dev)
1162 {
1163         INIT_COMPLETION(dev->power.completion);
1164
1165         if (pm_async_enabled && dev->power.async_suspend) {
1166                 get_device(dev);
1167                 async_schedule(async_suspend, dev);
1168                 return 0;
1169         }
1170
1171         return __device_suspend(dev, pm_transition, false);
1172 }
1173
1174 /**
1175  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1176  * @state: PM transition of the system being carried out.
1177  */
1178 int dpm_suspend(pm_message_t state)
1179 {
1180         ktime_t starttime = ktime_get();
1181         int error = 0;
1182
1183         might_sleep();
1184
1185         mutex_lock(&dpm_list_mtx);
1186         pm_transition = state;
1187         async_error = 0;
1188         while (!list_empty(&dpm_prepared_list)) {
1189                 struct device *dev = to_device(dpm_prepared_list.prev);
1190
1191                 get_device(dev);
1192                 mutex_unlock(&dpm_list_mtx);
1193
1194                 error = device_suspend(dev);
1195
1196                 mutex_lock(&dpm_list_mtx);
1197                 if (error) {
1198                         pm_dev_err(dev, state, "", error);
1199                         dpm_save_failed_dev(dev_name(dev));
1200                         put_device(dev);
1201                         break;
1202                 }
1203                 if (!list_empty(&dev->power.entry))
1204                         list_move(&dev->power.entry, &dpm_suspended_list);
1205                 put_device(dev);
1206                 if (async_error)
1207                         break;
1208         }
1209         mutex_unlock(&dpm_list_mtx);
1210         async_synchronize_full();
1211         if (!error)
1212                 error = async_error;
1213         if (error) {
1214                 suspend_stats.failed_suspend++;
1215                 dpm_save_failed_step(SUSPEND_SUSPEND);
1216         } else
1217                 dpm_show_time(starttime, state, NULL);
1218         return error;
1219 }
1220
1221 /**
1222  * device_prepare - Prepare a device for system power transition.
1223  * @dev: Device to handle.
1224  * @state: PM transition of the system being carried out.
1225  *
1226  * Execute the ->prepare() callback(s) for given device.  No new children of the
1227  * device may be registered after this function has returned.
1228  */
1229 static int device_prepare(struct device *dev, pm_message_t state)
1230 {
1231         int (*callback)(struct device *) = NULL;
1232         char *info = NULL;
1233         int error = 0;
1234
1235         device_lock(dev);
1236
1237         dev->power.wakeup_path = device_may_wakeup(dev);
1238
1239         if (dev->pm_domain) {
1240                 info = "preparing power domain ";
1241                 callback = dev->pm_domain->ops.prepare;
1242         } else if (dev->type && dev->type->pm) {
1243                 info = "preparing type ";
1244                 callback = dev->type->pm->prepare;
1245         } else if (dev->class && dev->class->pm) {
1246                 info = "preparing class ";
1247                 callback = dev->class->pm->prepare;
1248         } else if (dev->bus && dev->bus->pm) {
1249                 info = "preparing bus ";
1250                 callback = dev->bus->pm->prepare;
1251         }
1252
1253         if (!callback && dev->driver && dev->driver->pm) {
1254                 info = "preparing driver ";
1255                 callback = dev->driver->pm->prepare;
1256         }
1257
1258         if (callback) {
1259                 error = callback(dev);
1260                 suspend_report_result(callback, error);
1261         }
1262
1263         device_unlock(dev);
1264
1265         return error;
1266 }
1267
1268 /**
1269  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1270  * @state: PM transition of the system being carried out.
1271  *
1272  * Execute the ->prepare() callback(s) for all devices.
1273  */
1274 int dpm_prepare(pm_message_t state)
1275 {
1276         int error = 0;
1277
1278         might_sleep();
1279
1280         mutex_lock(&dpm_list_mtx);
1281         while (!list_empty(&dpm_list)) {
1282                 struct device *dev = to_device(dpm_list.next);
1283
1284                 get_device(dev);
1285                 mutex_unlock(&dpm_list_mtx);
1286
1287                 error = device_prepare(dev, state);
1288
1289                 mutex_lock(&dpm_list_mtx);
1290                 if (error) {
1291                         if (error == -EAGAIN) {
1292                                 put_device(dev);
1293                                 error = 0;
1294                                 continue;
1295                         }
1296                         printk(KERN_INFO "PM: Device %s not prepared "
1297                                 "for power transition: code %d\n",
1298                                 dev_name(dev), error);
1299                         put_device(dev);
1300                         break;
1301                 }
1302                 dev->power.is_prepared = true;
1303                 if (!list_empty(&dev->power.entry))
1304                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1305                 put_device(dev);
1306         }
1307         mutex_unlock(&dpm_list_mtx);
1308         return error;
1309 }
1310
1311 /**
1312  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1313  * @state: PM transition of the system being carried out.
1314  *
1315  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1316  * callbacks for them.
1317  */
1318 int dpm_suspend_start(pm_message_t state)
1319 {
1320         int error;
1321
1322         error = dpm_prepare(state);
1323         if (error) {
1324                 suspend_stats.failed_prepare++;
1325                 dpm_save_failed_step(SUSPEND_PREPARE);
1326         } else
1327                 error = dpm_suspend(state);
1328         return error;
1329 }
1330 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1331
1332 void __suspend_report_result(const char *function, void *fn, int ret)
1333 {
1334         if (ret)
1335                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1336 }
1337 EXPORT_SYMBOL_GPL(__suspend_report_result);
1338
1339 /**
1340  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1341  * @dev: Device to wait for.
1342  * @subordinate: Device that needs to wait for @dev.
1343  */
1344 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1345 {
1346         dpm_wait(dev, subordinate->power.async_suspend);
1347         return async_error;
1348 }
1349 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);