Make suspend abort reason logging depend on CONFIG_PM_SLEEP
[linux-3.10.git] / kernel / task_work.c
1 #include <linux/spinlock.h>
2 #include <linux/task_work.h>
3 #include <linux/tracehook.h>
4
5 static struct callback_head work_exited; /* all we need is ->next == NULL */
6
7 int
8 task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
9 {
10         struct callback_head *head;
11
12         do {
13                 head = ACCESS_ONCE(task->task_works);
14                 if (unlikely(head == &work_exited))
15                         return -ESRCH;
16                 work->next = head;
17         } while (cmpxchg(&task->task_works, head, work) != head);
18
19         if (notify)
20                 set_notify_resume(task);
21         return 0;
22 }
23
24 struct callback_head *
25 task_work_cancel(struct task_struct *task, task_work_func_t func)
26 {
27         struct callback_head **pprev = &task->task_works;
28         struct callback_head *work = NULL;
29         unsigned long flags;
30         /*
31          * If cmpxchg() fails we continue without updating pprev.
32          * Either we raced with task_work_add() which added the
33          * new entry before this work, we will find it again. Or
34          * we raced with task_work_run(), *pprev == NULL/exited.
35          */
36         raw_spin_lock_irqsave(&task->pi_lock, flags);
37         while ((work = ACCESS_ONCE(*pprev))) {
38                 read_barrier_depends();
39                 if (work->func != func)
40                         pprev = &work->next;
41                 else if (cmpxchg(pprev, work, work->next) == work)
42                         break;
43         }
44         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
45
46         return work;
47 }
48
49 void task_work_run(void)
50 {
51         struct task_struct *task = current;
52         struct callback_head *work, *head, *next;
53
54         for (;;) {
55                 /*
56                  * work->func() can do task_work_add(), do not set
57                  * work_exited unless the list is empty.
58                  */
59                 do {
60                         work = ACCESS_ONCE(task->task_works);
61                         head = !work && (task->flags & PF_EXITING) ?
62                                 &work_exited : NULL;
63                 } while (cmpxchg(&task->task_works, work, head) != work);
64
65                 if (!work)
66                         break;
67                 /*
68                  * Synchronize with task_work_cancel(). It can't remove
69                  * the first entry == work, cmpxchg(task_works) should
70                  * fail, but it can play with *work and other entries.
71                  */
72                 raw_spin_unlock_wait(&task->pi_lock);
73                 smp_mb();
74
75                 /* Reverse the list to run the works in fifo order */
76                 head = NULL;
77                 do {
78                         next = work->next;
79                         work->next = head;
80                         head = work;
81                         work = next;
82                 } while (work);
83
84                 work = head;
85                 do {
86                         next = work->next;
87                         work->func(work);
88                         work = next;
89                         cond_resched();
90                 } while (work);
91         }
92 }