PM / Runtime: Add no_callbacks flag
[linux-2.6.git] / drivers / base / power / runtime.c
1 /*
2  * drivers/base/power/runtime.c - Helper functions for device run-time PM
3  *
4  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6  *
7  * This file is released under the GPLv2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/jiffies.h>
13 #include "power.h"
14
15 static int rpm_resume(struct device *dev, int rpmflags);
16 static int rpm_suspend(struct device *dev, int rpmflags);
17
18 /**
19  * update_pm_runtime_accounting - Update the time accounting of power states
20  * @dev: Device to update the accounting for
21  *
22  * In order to be able to have time accounting of the various power states
23  * (as used by programs such as PowerTOP to show the effectiveness of runtime
24  * PM), we need to track the time spent in each state.
25  * update_pm_runtime_accounting must be called each time before the
26  * runtime_status field is updated, to account the time in the old state
27  * correctly.
28  */
29 void update_pm_runtime_accounting(struct device *dev)
30 {
31         unsigned long now = jiffies;
32         int delta;
33
34         delta = now - dev->power.accounting_timestamp;
35
36         if (delta < 0)
37                 delta = 0;
38
39         dev->power.accounting_timestamp = now;
40
41         if (dev->power.disable_depth > 0)
42                 return;
43
44         if (dev->power.runtime_status == RPM_SUSPENDED)
45                 dev->power.suspended_jiffies += delta;
46         else
47                 dev->power.active_jiffies += delta;
48 }
49
50 static void __update_runtime_status(struct device *dev, enum rpm_status status)
51 {
52         update_pm_runtime_accounting(dev);
53         dev->power.runtime_status = status;
54 }
55
56 /**
57  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
58  * @dev: Device to handle.
59  */
60 static void pm_runtime_deactivate_timer(struct device *dev)
61 {
62         if (dev->power.timer_expires > 0) {
63                 del_timer(&dev->power.suspend_timer);
64                 dev->power.timer_expires = 0;
65         }
66 }
67
68 /**
69  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
70  * @dev: Device to handle.
71  */
72 static void pm_runtime_cancel_pending(struct device *dev)
73 {
74         pm_runtime_deactivate_timer(dev);
75         /*
76          * In case there's a request pending, make sure its work function will
77          * return without doing anything.
78          */
79         dev->power.request = RPM_REQ_NONE;
80 }
81
82 /**
83  * rpm_check_suspend_allowed - Test whether a device may be suspended.
84  * @dev: Device to test.
85  */
86 static int rpm_check_suspend_allowed(struct device *dev)
87 {
88         int retval = 0;
89
90         if (dev->power.runtime_error)
91                 retval = -EINVAL;
92         else if (atomic_read(&dev->power.usage_count) > 0
93             || dev->power.disable_depth > 0)
94                 retval = -EAGAIN;
95         else if (!pm_children_suspended(dev))
96                 retval = -EBUSY;
97
98         /* Pending resume requests take precedence over suspends. */
99         else if ((dev->power.deferred_resume
100                         && dev->power.status == RPM_SUSPENDING)
101             || (dev->power.request_pending
102                         && dev->power.request == RPM_REQ_RESUME))
103                 retval = -EAGAIN;
104         else if (dev->power.runtime_status == RPM_SUSPENDED)
105                 retval = 1;
106
107         return retval;
108 }
109
110
111 /**
112  * rpm_idle - Notify device bus type if the device can be suspended.
113  * @dev: Device to notify the bus type about.
114  * @rpmflags: Flag bits.
115  *
116  * Check if the device's run-time PM status allows it to be suspended.  If
117  * another idle notification has been started earlier, return immediately.  If
118  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
119  * run the ->runtime_idle() callback directly.
120  *
121  * This function must be called under dev->power.lock with interrupts disabled.
122  */
123 static int rpm_idle(struct device *dev, int rpmflags)
124         __releases(&dev->power.lock) __acquires(&dev->power.lock)
125 {
126         int retval;
127
128         retval = rpm_check_suspend_allowed(dev);
129         if (retval < 0)
130                 ;       /* Conditions are wrong. */
131
132         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
133         else if (dev->power.runtime_status != RPM_ACTIVE)
134                 retval = -EAGAIN;
135
136         /*
137          * Any pending request other than an idle notification takes
138          * precedence over us, except that the timer may be running.
139          */
140         else if (dev->power.request_pending &&
141             dev->power.request > RPM_REQ_IDLE)
142                 retval = -EAGAIN;
143
144         /* Act as though RPM_NOWAIT is always set. */
145         else if (dev->power.idle_notification)
146                 retval = -EINPROGRESS;
147         if (retval)
148                 goto out;
149
150         /* Pending requests need to be canceled. */
151         dev->power.request = RPM_REQ_NONE;
152
153         if (dev->power.no_callbacks) {
154                 /* Assume ->runtime_idle() callback would have suspended. */
155                 retval = rpm_suspend(dev, rpmflags);
156                 goto out;
157         }
158
159         /* Carry out an asynchronous or a synchronous idle notification. */
160         if (rpmflags & RPM_ASYNC) {
161                 dev->power.request = RPM_REQ_IDLE;
162                 if (!dev->power.request_pending) {
163                         dev->power.request_pending = true;
164                         queue_work(pm_wq, &dev->power.work);
165                 }
166                 goto out;
167         }
168
169         dev->power.idle_notification = true;
170
171         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
172                 spin_unlock_irq(&dev->power.lock);
173
174                 dev->bus->pm->runtime_idle(dev);
175
176                 spin_lock_irq(&dev->power.lock);
177         } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
178                 spin_unlock_irq(&dev->power.lock);
179
180                 dev->type->pm->runtime_idle(dev);
181
182                 spin_lock_irq(&dev->power.lock);
183         } else if (dev->class && dev->class->pm
184             && dev->class->pm->runtime_idle) {
185                 spin_unlock_irq(&dev->power.lock);
186
187                 dev->class->pm->runtime_idle(dev);
188
189                 spin_lock_irq(&dev->power.lock);
190         }
191
192         dev->power.idle_notification = false;
193         wake_up_all(&dev->power.wait_queue);
194
195  out:
196         return retval;
197 }
198
199 /**
200  * rpm_suspend - Carry out run-time suspend of given device.
201  * @dev: Device to suspend.
202  * @rpmflags: Flag bits.
203  *
204  * Check if the device's run-time PM status allows it to be suspended.  If
205  * another suspend has been started earlier, either return immediately or wait
206  * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags.  Cancel a
207  * pending idle notification.  If the RPM_ASYNC flag is set then queue a
208  * suspend request; otherwise run the ->runtime_suspend() callback directly.
209  * If a deferred resume was requested while the callback was running then carry
210  * it out; otherwise send an idle notification for the device (if the suspend
211  * failed) or for its parent (if the suspend succeeded).
212  *
213  * This function must be called under dev->power.lock with interrupts disabled.
214  */
215 static int rpm_suspend(struct device *dev, int rpmflags)
216         __releases(&dev->power.lock) __acquires(&dev->power.lock)
217 {
218         struct device *parent = NULL;
219         bool notify = false;
220         int retval;
221
222         dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
223
224  repeat:
225         retval = rpm_check_suspend_allowed(dev);
226
227         if (retval < 0)
228                 ;       /* Conditions are wrong. */
229
230         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
231         else if (dev->power.runtime_status == RPM_RESUMING &&
232             !(rpmflags & RPM_ASYNC))
233                 retval = -EAGAIN;
234         if (retval)
235                 goto out;
236
237         /* Other scheduled or pending requests need to be canceled. */
238         pm_runtime_cancel_pending(dev);
239
240         if (dev->power.runtime_status == RPM_SUSPENDING) {
241                 DEFINE_WAIT(wait);
242
243                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
244                         retval = -EINPROGRESS;
245                         goto out;
246                 }
247
248                 /* Wait for the other suspend running in parallel with us. */
249                 for (;;) {
250                         prepare_to_wait(&dev->power.wait_queue, &wait,
251                                         TASK_UNINTERRUPTIBLE);
252                         if (dev->power.runtime_status != RPM_SUSPENDING)
253                                 break;
254
255                         spin_unlock_irq(&dev->power.lock);
256
257                         schedule();
258
259                         spin_lock_irq(&dev->power.lock);
260                 }
261                 finish_wait(&dev->power.wait_queue, &wait);
262                 goto repeat;
263         }
264
265         dev->power.deferred_resume = false;
266         if (dev->power.no_callbacks)
267                 goto no_callback;       /* Assume success. */
268
269         /* Carry out an asynchronous or a synchronous suspend. */
270         if (rpmflags & RPM_ASYNC) {
271                 dev->power.request = RPM_REQ_SUSPEND;
272                 if (!dev->power.request_pending) {
273                         dev->power.request_pending = true;
274                         queue_work(pm_wq, &dev->power.work);
275                 }
276                 goto out;
277         }
278
279         __update_runtime_status(dev, RPM_SUSPENDING);
280
281         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
282                 spin_unlock_irq(&dev->power.lock);
283
284                 retval = dev->bus->pm->runtime_suspend(dev);
285
286                 spin_lock_irq(&dev->power.lock);
287                 dev->power.runtime_error = retval;
288         } else if (dev->type && dev->type->pm
289             && dev->type->pm->runtime_suspend) {
290                 spin_unlock_irq(&dev->power.lock);
291
292                 retval = dev->type->pm->runtime_suspend(dev);
293
294                 spin_lock_irq(&dev->power.lock);
295                 dev->power.runtime_error = retval;
296         } else if (dev->class && dev->class->pm
297             && dev->class->pm->runtime_suspend) {
298                 spin_unlock_irq(&dev->power.lock);
299
300                 retval = dev->class->pm->runtime_suspend(dev);
301
302                 spin_lock_irq(&dev->power.lock);
303                 dev->power.runtime_error = retval;
304         } else {
305                 retval = -ENOSYS;
306         }
307
308         if (retval) {
309                 __update_runtime_status(dev, RPM_ACTIVE);
310                 dev->power.deferred_resume = 0;
311                 if (retval == -EAGAIN || retval == -EBUSY) {
312                         if (dev->power.timer_expires == 0)
313                                 notify = true;
314                         dev->power.runtime_error = 0;
315                 } else {
316                         pm_runtime_cancel_pending(dev);
317                 }
318         } else {
319  no_callback:
320                 __update_runtime_status(dev, RPM_SUSPENDED);
321                 pm_runtime_deactivate_timer(dev);
322
323                 if (dev->parent) {
324                         parent = dev->parent;
325                         atomic_add_unless(&parent->power.child_count, -1, 0);
326                 }
327         }
328         wake_up_all(&dev->power.wait_queue);
329
330         if (dev->power.deferred_resume) {
331                 rpm_resume(dev, 0);
332                 retval = -EAGAIN;
333                 goto out;
334         }
335
336         if (notify)
337                 rpm_idle(dev, 0);
338
339         if (parent && !parent->power.ignore_children) {
340                 spin_unlock_irq(&dev->power.lock);
341
342                 pm_request_idle(parent);
343
344                 spin_lock_irq(&dev->power.lock);
345         }
346
347  out:
348         dev_dbg(dev, "%s returns %d\n", __func__, retval);
349
350         return retval;
351 }
352
353 /**
354  * rpm_resume - Carry out run-time resume of given device.
355  * @dev: Device to resume.
356  * @rpmflags: Flag bits.
357  *
358  * Check if the device's run-time PM status allows it to be resumed.  Cancel
359  * any scheduled or pending requests.  If another resume has been started
360  * earlier, either return imediately or wait for it to finish, depending on the
361  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
362  * parallel with this function, either tell the other process to resume after
363  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
364  * flag is set then queue a resume request; otherwise run the
365  * ->runtime_resume() callback directly.  Queue an idle notification for the
366  * device if the resume succeeded.
367  *
368  * This function must be called under dev->power.lock with interrupts disabled.
369  */
370 static int rpm_resume(struct device *dev, int rpmflags)
371         __releases(&dev->power.lock) __acquires(&dev->power.lock)
372 {
373         struct device *parent = NULL;
374         int retval = 0;
375
376         dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
377
378  repeat:
379         if (dev->power.runtime_error)
380                 retval = -EINVAL;
381         else if (dev->power.disable_depth > 0)
382                 retval = -EAGAIN;
383         if (retval)
384                 goto out;
385
386         /* Other scheduled or pending requests need to be canceled. */
387         pm_runtime_cancel_pending(dev);
388
389         if (dev->power.runtime_status == RPM_ACTIVE) {
390                 retval = 1;
391                 goto out;
392         }
393
394         if (dev->power.runtime_status == RPM_RESUMING
395             || dev->power.runtime_status == RPM_SUSPENDING) {
396                 DEFINE_WAIT(wait);
397
398                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
399                         if (dev->power.runtime_status == RPM_SUSPENDING)
400                                 dev->power.deferred_resume = true;
401                         else
402                                 retval = -EINPROGRESS;
403                         goto out;
404                 }
405
406                 /* Wait for the operation carried out in parallel with us. */
407                 for (;;) {
408                         prepare_to_wait(&dev->power.wait_queue, &wait,
409                                         TASK_UNINTERRUPTIBLE);
410                         if (dev->power.runtime_status != RPM_RESUMING
411                             && dev->power.runtime_status != RPM_SUSPENDING)
412                                 break;
413
414                         spin_unlock_irq(&dev->power.lock);
415
416                         schedule();
417
418                         spin_lock_irq(&dev->power.lock);
419                 }
420                 finish_wait(&dev->power.wait_queue, &wait);
421                 goto repeat;
422         }
423
424         /*
425          * See if we can skip waking up the parent.  This is safe only if
426          * power.no_callbacks is set, because otherwise we don't know whether
427          * the resume will actually succeed.
428          */
429         if (dev->power.no_callbacks && !parent && dev->parent) {
430                 spin_lock(&dev->parent->power.lock);
431                 if (dev->parent->power.disable_depth > 0
432                     || dev->parent->power.ignore_children
433                     || dev->parent->power.runtime_status == RPM_ACTIVE) {
434                         atomic_inc(&dev->parent->power.child_count);
435                         spin_unlock(&dev->parent->power.lock);
436                         goto no_callback;       /* Assume success. */
437                 }
438                 spin_unlock(&dev->parent->power.lock);
439         }
440
441         /* Carry out an asynchronous or a synchronous resume. */
442         if (rpmflags & RPM_ASYNC) {
443                 dev->power.request = RPM_REQ_RESUME;
444                 if (!dev->power.request_pending) {
445                         dev->power.request_pending = true;
446                         queue_work(pm_wq, &dev->power.work);
447                 }
448                 retval = 0;
449                 goto out;
450         }
451
452         if (!parent && dev->parent) {
453                 /*
454                  * Increment the parent's resume counter and resume it if
455                  * necessary.
456                  */
457                 parent = dev->parent;
458                 spin_unlock(&dev->power.lock);
459
460                 pm_runtime_get_noresume(parent);
461
462                 spin_lock(&parent->power.lock);
463                 /*
464                  * We can resume if the parent's run-time PM is disabled or it
465                  * is set to ignore children.
466                  */
467                 if (!parent->power.disable_depth
468                     && !parent->power.ignore_children) {
469                         rpm_resume(parent, 0);
470                         if (parent->power.runtime_status != RPM_ACTIVE)
471                                 retval = -EBUSY;
472                 }
473                 spin_unlock(&parent->power.lock);
474
475                 spin_lock(&dev->power.lock);
476                 if (retval)
477                         goto out;
478                 goto repeat;
479         }
480
481         if (dev->power.no_callbacks)
482                 goto no_callback;       /* Assume success. */
483
484         __update_runtime_status(dev, RPM_RESUMING);
485
486         if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
487                 spin_unlock_irq(&dev->power.lock);
488
489                 retval = dev->bus->pm->runtime_resume(dev);
490
491                 spin_lock_irq(&dev->power.lock);
492                 dev->power.runtime_error = retval;
493         } else if (dev->type && dev->type->pm
494             && dev->type->pm->runtime_resume) {
495                 spin_unlock_irq(&dev->power.lock);
496
497                 retval = dev->type->pm->runtime_resume(dev);
498
499                 spin_lock_irq(&dev->power.lock);
500                 dev->power.runtime_error = retval;
501         } else if (dev->class && dev->class->pm
502             && dev->class->pm->runtime_resume) {
503                 spin_unlock_irq(&dev->power.lock);
504
505                 retval = dev->class->pm->runtime_resume(dev);
506
507                 spin_lock_irq(&dev->power.lock);
508                 dev->power.runtime_error = retval;
509         } else {
510                 retval = -ENOSYS;
511         }
512
513         if (retval) {
514                 __update_runtime_status(dev, RPM_SUSPENDED);
515                 pm_runtime_cancel_pending(dev);
516         } else {
517  no_callback:
518                 __update_runtime_status(dev, RPM_ACTIVE);
519                 if (parent)
520                         atomic_inc(&parent->power.child_count);
521         }
522         wake_up_all(&dev->power.wait_queue);
523
524         if (!retval)
525                 rpm_idle(dev, RPM_ASYNC);
526
527  out:
528         if (parent) {
529                 spin_unlock_irq(&dev->power.lock);
530
531                 pm_runtime_put(parent);
532
533                 spin_lock_irq(&dev->power.lock);
534         }
535
536         dev_dbg(dev, "%s returns %d\n", __func__, retval);
537
538         return retval;
539 }
540
541 /**
542  * pm_runtime_work - Universal run-time PM work function.
543  * @work: Work structure used for scheduling the execution of this function.
544  *
545  * Use @work to get the device object the work is to be done for, determine what
546  * is to be done and execute the appropriate run-time PM function.
547  */
548 static void pm_runtime_work(struct work_struct *work)
549 {
550         struct device *dev = container_of(work, struct device, power.work);
551         enum rpm_request req;
552
553         spin_lock_irq(&dev->power.lock);
554
555         if (!dev->power.request_pending)
556                 goto out;
557
558         req = dev->power.request;
559         dev->power.request = RPM_REQ_NONE;
560         dev->power.request_pending = false;
561
562         switch (req) {
563         case RPM_REQ_NONE:
564                 break;
565         case RPM_REQ_IDLE:
566                 rpm_idle(dev, RPM_NOWAIT);
567                 break;
568         case RPM_REQ_SUSPEND:
569                 rpm_suspend(dev, RPM_NOWAIT);
570                 break;
571         case RPM_REQ_RESUME:
572                 rpm_resume(dev, RPM_NOWAIT);
573                 break;
574         }
575
576  out:
577         spin_unlock_irq(&dev->power.lock);
578 }
579
580 /**
581  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
582  * @data: Device pointer passed by pm_schedule_suspend().
583  *
584  * Check if the time is right and queue a suspend request.
585  */
586 static void pm_suspend_timer_fn(unsigned long data)
587 {
588         struct device *dev = (struct device *)data;
589         unsigned long flags;
590         unsigned long expires;
591
592         spin_lock_irqsave(&dev->power.lock, flags);
593
594         expires = dev->power.timer_expires;
595         /* If 'expire' is after 'jiffies' we've been called too early. */
596         if (expires > 0 && !time_after(expires, jiffies)) {
597                 dev->power.timer_expires = 0;
598                 rpm_suspend(dev, RPM_ASYNC);
599         }
600
601         spin_unlock_irqrestore(&dev->power.lock, flags);
602 }
603
604 /**
605  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
606  * @dev: Device to suspend.
607  * @delay: Time to wait before submitting a suspend request, in milliseconds.
608  */
609 int pm_schedule_suspend(struct device *dev, unsigned int delay)
610 {
611         unsigned long flags;
612         int retval;
613
614         spin_lock_irqsave(&dev->power.lock, flags);
615
616         if (!delay) {
617                 retval = rpm_suspend(dev, RPM_ASYNC);
618                 goto out;
619         }
620
621         retval = rpm_check_suspend_allowed(dev);
622         if (retval)
623                 goto out;
624
625         /* Other scheduled or pending requests need to be canceled. */
626         pm_runtime_cancel_pending(dev);
627
628         dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
629         dev->power.timer_expires += !dev->power.timer_expires;
630         mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
631
632  out:
633         spin_unlock_irqrestore(&dev->power.lock, flags);
634
635         return retval;
636 }
637 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
638
639 /**
640  * __pm_runtime_idle - Entry point for run-time idle operations.
641  * @dev: Device to send idle notification for.
642  * @rpmflags: Flag bits.
643  *
644  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
645  * return immediately if it is larger than zero.  Then carry out an idle
646  * notification, either synchronous or asynchronous.
647  *
648  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
649  */
650 int __pm_runtime_idle(struct device *dev, int rpmflags)
651 {
652         unsigned long flags;
653         int retval;
654
655         if (rpmflags & RPM_GET_PUT) {
656                 if (!atomic_dec_and_test(&dev->power.usage_count))
657                         return 0;
658         }
659
660         spin_lock_irqsave(&dev->power.lock, flags);
661         retval = rpm_idle(dev, rpmflags);
662         spin_unlock_irqrestore(&dev->power.lock, flags);
663
664         return retval;
665 }
666 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
667
668 /**
669  * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
670  * @dev: Device to suspend.
671  * @rpmflags: Flag bits.
672  *
673  * Carry out a suspend, either synchronous or asynchronous.
674  *
675  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
676  */
677 int __pm_runtime_suspend(struct device *dev, int rpmflags)
678 {
679         unsigned long flags;
680         int retval;
681
682         spin_lock_irqsave(&dev->power.lock, flags);
683         retval = rpm_suspend(dev, rpmflags);
684         spin_unlock_irqrestore(&dev->power.lock, flags);
685
686         return retval;
687 }
688 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
689
690 /**
691  * __pm_runtime_resume - Entry point for run-time resume operations.
692  * @dev: Device to resume.
693  * @rpmflags: Flag bits.
694  *
695  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
696  * carry out a resume, either synchronous or asynchronous.
697  *
698  * This routine may be called in atomic context if the RPM_ASYNC flag is set.
699  */
700 int __pm_runtime_resume(struct device *dev, int rpmflags)
701 {
702         unsigned long flags;
703         int retval;
704
705         if (rpmflags & RPM_GET_PUT)
706                 atomic_inc(&dev->power.usage_count);
707
708         spin_lock_irqsave(&dev->power.lock, flags);
709         retval = rpm_resume(dev, rpmflags);
710         spin_unlock_irqrestore(&dev->power.lock, flags);
711
712         return retval;
713 }
714 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
715
716 /**
717  * __pm_runtime_set_status - Set run-time PM status of a device.
718  * @dev: Device to handle.
719  * @status: New run-time PM status of the device.
720  *
721  * If run-time PM of the device is disabled or its power.runtime_error field is
722  * different from zero, the status may be changed either to RPM_ACTIVE, or to
723  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
724  * However, if the device has a parent and the parent is not active, and the
725  * parent's power.ignore_children flag is unset, the device's status cannot be
726  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
727  *
728  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
729  * and the device parent's counter of unsuspended children is modified to
730  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
731  * notification request for the parent is submitted.
732  */
733 int __pm_runtime_set_status(struct device *dev, unsigned int status)
734 {
735         struct device *parent = dev->parent;
736         unsigned long flags;
737         bool notify_parent = false;
738         int error = 0;
739
740         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
741                 return -EINVAL;
742
743         spin_lock_irqsave(&dev->power.lock, flags);
744
745         if (!dev->power.runtime_error && !dev->power.disable_depth) {
746                 error = -EAGAIN;
747                 goto out;
748         }
749
750         if (dev->power.runtime_status == status)
751                 goto out_set;
752
753         if (status == RPM_SUSPENDED) {
754                 /* It always is possible to set the status to 'suspended'. */
755                 if (parent) {
756                         atomic_add_unless(&parent->power.child_count, -1, 0);
757                         notify_parent = !parent->power.ignore_children;
758                 }
759                 goto out_set;
760         }
761
762         if (parent) {
763                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
764
765                 /*
766                  * It is invalid to put an active child under a parent that is
767                  * not active, has run-time PM enabled and the
768                  * 'power.ignore_children' flag unset.
769                  */
770                 if (!parent->power.disable_depth
771                     && !parent->power.ignore_children
772                     && parent->power.runtime_status != RPM_ACTIVE)
773                         error = -EBUSY;
774                 else if (dev->power.runtime_status == RPM_SUSPENDED)
775                         atomic_inc(&parent->power.child_count);
776
777                 spin_unlock(&parent->power.lock);
778
779                 if (error)
780                         goto out;
781         }
782
783  out_set:
784         __update_runtime_status(dev, status);
785         dev->power.runtime_error = 0;
786  out:
787         spin_unlock_irqrestore(&dev->power.lock, flags);
788
789         if (notify_parent)
790                 pm_request_idle(parent);
791
792         return error;
793 }
794 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
795
796 /**
797  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
798  * @dev: Device to handle.
799  *
800  * Flush all pending requests for the device from pm_wq and wait for all
801  * run-time PM operations involving the device in progress to complete.
802  *
803  * Should be called under dev->power.lock with interrupts disabled.
804  */
805 static void __pm_runtime_barrier(struct device *dev)
806 {
807         pm_runtime_deactivate_timer(dev);
808
809         if (dev->power.request_pending) {
810                 dev->power.request = RPM_REQ_NONE;
811                 spin_unlock_irq(&dev->power.lock);
812
813                 cancel_work_sync(&dev->power.work);
814
815                 spin_lock_irq(&dev->power.lock);
816                 dev->power.request_pending = false;
817         }
818
819         if (dev->power.runtime_status == RPM_SUSPENDING
820             || dev->power.runtime_status == RPM_RESUMING
821             || dev->power.idle_notification) {
822                 DEFINE_WAIT(wait);
823
824                 /* Suspend, wake-up or idle notification in progress. */
825                 for (;;) {
826                         prepare_to_wait(&dev->power.wait_queue, &wait,
827                                         TASK_UNINTERRUPTIBLE);
828                         if (dev->power.runtime_status != RPM_SUSPENDING
829                             && dev->power.runtime_status != RPM_RESUMING
830                             && !dev->power.idle_notification)
831                                 break;
832                         spin_unlock_irq(&dev->power.lock);
833
834                         schedule();
835
836                         spin_lock_irq(&dev->power.lock);
837                 }
838                 finish_wait(&dev->power.wait_queue, &wait);
839         }
840 }
841
842 /**
843  * pm_runtime_barrier - Flush pending requests and wait for completions.
844  * @dev: Device to handle.
845  *
846  * Prevent the device from being suspended by incrementing its usage counter and
847  * if there's a pending resume request for the device, wake the device up.
848  * Next, make sure that all pending requests for the device have been flushed
849  * from pm_wq and wait for all run-time PM operations involving the device in
850  * progress to complete.
851  *
852  * Return value:
853  * 1, if there was a resume request pending and the device had to be woken up,
854  * 0, otherwise
855  */
856 int pm_runtime_barrier(struct device *dev)
857 {
858         int retval = 0;
859
860         pm_runtime_get_noresume(dev);
861         spin_lock_irq(&dev->power.lock);
862
863         if (dev->power.request_pending
864             && dev->power.request == RPM_REQ_RESUME) {
865                 rpm_resume(dev, 0);
866                 retval = 1;
867         }
868
869         __pm_runtime_barrier(dev);
870
871         spin_unlock_irq(&dev->power.lock);
872         pm_runtime_put_noidle(dev);
873
874         return retval;
875 }
876 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
877
878 /**
879  * __pm_runtime_disable - Disable run-time PM of a device.
880  * @dev: Device to handle.
881  * @check_resume: If set, check if there's a resume request for the device.
882  *
883  * Increment power.disable_depth for the device and if was zero previously,
884  * cancel all pending run-time PM requests for the device and wait for all
885  * operations in progress to complete.  The device can be either active or
886  * suspended after its run-time PM has been disabled.
887  *
888  * If @check_resume is set and there's a resume request pending when
889  * __pm_runtime_disable() is called and power.disable_depth is zero, the
890  * function will wake up the device before disabling its run-time PM.
891  */
892 void __pm_runtime_disable(struct device *dev, bool check_resume)
893 {
894         spin_lock_irq(&dev->power.lock);
895
896         if (dev->power.disable_depth > 0) {
897                 dev->power.disable_depth++;
898                 goto out;
899         }
900
901         /*
902          * Wake up the device if there's a resume request pending, because that
903          * means there probably is some I/O to process and disabling run-time PM
904          * shouldn't prevent the device from processing the I/O.
905          */
906         if (check_resume && dev->power.request_pending
907             && dev->power.request == RPM_REQ_RESUME) {
908                 /*
909                  * Prevent suspends and idle notifications from being carried
910                  * out after we have woken up the device.
911                  */
912                 pm_runtime_get_noresume(dev);
913
914                 rpm_resume(dev, 0);
915
916                 pm_runtime_put_noidle(dev);
917         }
918
919         if (!dev->power.disable_depth++)
920                 __pm_runtime_barrier(dev);
921
922  out:
923         spin_unlock_irq(&dev->power.lock);
924 }
925 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
926
927 /**
928  * pm_runtime_enable - Enable run-time PM of a device.
929  * @dev: Device to handle.
930  */
931 void pm_runtime_enable(struct device *dev)
932 {
933         unsigned long flags;
934
935         spin_lock_irqsave(&dev->power.lock, flags);
936
937         if (dev->power.disable_depth > 0)
938                 dev->power.disable_depth--;
939         else
940                 dev_warn(dev, "Unbalanced %s!\n", __func__);
941
942         spin_unlock_irqrestore(&dev->power.lock, flags);
943 }
944 EXPORT_SYMBOL_GPL(pm_runtime_enable);
945
946 /**
947  * pm_runtime_forbid - Block run-time PM of a device.
948  * @dev: Device to handle.
949  *
950  * Increase the device's usage count and clear its power.runtime_auto flag,
951  * so that it cannot be suspended at run time until pm_runtime_allow() is called
952  * for it.
953  */
954 void pm_runtime_forbid(struct device *dev)
955 {
956         spin_lock_irq(&dev->power.lock);
957         if (!dev->power.runtime_auto)
958                 goto out;
959
960         dev->power.runtime_auto = false;
961         atomic_inc(&dev->power.usage_count);
962         rpm_resume(dev, 0);
963
964  out:
965         spin_unlock_irq(&dev->power.lock);
966 }
967 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
968
969 /**
970  * pm_runtime_allow - Unblock run-time PM of a device.
971  * @dev: Device to handle.
972  *
973  * Decrease the device's usage count and set its power.runtime_auto flag.
974  */
975 void pm_runtime_allow(struct device *dev)
976 {
977         spin_lock_irq(&dev->power.lock);
978         if (dev->power.runtime_auto)
979                 goto out;
980
981         dev->power.runtime_auto = true;
982         if (atomic_dec_and_test(&dev->power.usage_count))
983                 rpm_idle(dev, 0);
984
985  out:
986         spin_unlock_irq(&dev->power.lock);
987 }
988 EXPORT_SYMBOL_GPL(pm_runtime_allow);
989
990 /**
991  * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
992  * @dev: Device to handle.
993  *
994  * Set the power.no_callbacks flag, which tells the PM core that this
995  * device is power-managed through its parent and has no run-time PM
996  * callbacks of its own.  The run-time sysfs attributes will be removed.
997  *
998  */
999 void pm_runtime_no_callbacks(struct device *dev)
1000 {
1001         spin_lock_irq(&dev->power.lock);
1002         dev->power.no_callbacks = 1;
1003         spin_unlock_irq(&dev->power.lock);
1004         if (device_is_registered(dev))
1005                 rpm_sysfs_remove(dev);
1006 }
1007 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1008
1009 /**
1010  * pm_runtime_init - Initialize run-time PM fields in given device object.
1011  * @dev: Device object to initialize.
1012  */
1013 void pm_runtime_init(struct device *dev)
1014 {
1015         dev->power.runtime_status = RPM_SUSPENDED;
1016         dev->power.idle_notification = false;
1017
1018         dev->power.disable_depth = 1;
1019         atomic_set(&dev->power.usage_count, 0);
1020
1021         dev->power.runtime_error = 0;
1022
1023         atomic_set(&dev->power.child_count, 0);
1024         pm_suspend_ignore_children(dev, false);
1025         dev->power.runtime_auto = true;
1026
1027         dev->power.request_pending = false;
1028         dev->power.request = RPM_REQ_NONE;
1029         dev->power.deferred_resume = false;
1030         dev->power.accounting_timestamp = jiffies;
1031         INIT_WORK(&dev->power.work, pm_runtime_work);
1032
1033         dev->power.timer_expires = 0;
1034         setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1035                         (unsigned long)dev);
1036
1037         init_waitqueue_head(&dev->power.wait_queue);
1038 }
1039
1040 /**
1041  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1042  * @dev: Device object being removed from device hierarchy.
1043  */
1044 void pm_runtime_remove(struct device *dev)
1045 {
1046         __pm_runtime_disable(dev, false);
1047
1048         /* Change the status back to 'suspended' to match the initial status. */
1049         if (dev->power.runtime_status == RPM_ACTIVE)
1050                 pm_runtime_set_suspended(dev);
1051 }