blob: f13ec0bda1d5af70713d5d237612d120141e84dd [file] [log] [blame]
Oleg Nesterove73f8952012-05-11 10:59:07 +10001#include <linux/spinlock.h>
2#include <linux/task_work.h>
3#include <linux/tracehook.h>
4
5int
Oleg Nesterovac3d0da2012-08-26 21:12:09 +02006task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +10007{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +02008 struct callback_head *head;
Oleg Nesterove73f8952012-05-11 10:59:07 +10009 /*
Al Viroed3e6942012-06-27 11:31:24 +040010 * Not inserting the new work if the task has already passed
11 * exit_task_work() is the responisbility of callers.
Oleg Nesterove73f8952012-05-11 10:59:07 +100012 */
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020013 do {
14 head = ACCESS_ONCE(task->task_works);
15 work->next = head;
16 } while (cmpxchg(&task->task_works, head, work) != head);
Oleg Nesterove73f8952012-05-11 10:59:07 +100017
Al Viroed3e6942012-06-27 11:31:24 +040018 if (notify)
Oleg Nesterove73f8952012-05-11 10:59:07 +100019 set_notify_resume(task);
Al Viroed3e6942012-06-27 11:31:24 +040020 return 0;
Oleg Nesterove73f8952012-05-11 10:59:07 +100021}
22
Al Viro67d12142012-06-27 11:07:19 +040023struct callback_head *
Oleg Nesterove73f8952012-05-11 10:59:07 +100024task_work_cancel(struct task_struct *task, task_work_func_t func)
25{
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020026 struct callback_head **pprev = &task->task_works;
27 struct callback_head *work = NULL;
Oleg Nesterove73f8952012-05-11 10:59:07 +100028 unsigned long flags;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020029 /*
30 * If cmpxchg() fails we continue without updating pprev.
31 * Either we raced with task_work_add() which added the
32 * new entry before this work, we will find it again. Or
33 * we raced with task_work_run(), *pprev == NULL.
34 */
Oleg Nesterove73f8952012-05-11 10:59:07 +100035 raw_spin_lock_irqsave(&task->pi_lock, flags);
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020036 while ((work = ACCESS_ONCE(*pprev))) {
37 read_barrier_depends();
38 if (work->func != func)
39 pprev = &work->next;
40 else if (cmpxchg(pprev, work, work->next) == work)
41 break;
Oleg Nesterove73f8952012-05-11 10:59:07 +100042 }
Oleg Nesterove73f8952012-05-11 10:59:07 +100043 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020044
45 return work;
Oleg Nesterove73f8952012-05-11 10:59:07 +100046}
47
48void task_work_run(void)
49{
50 struct task_struct *task = current;
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020051 struct callback_head *work, *head, *next;
Oleg Nesterove73f8952012-05-11 10:59:07 +100052
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020053 for (;;) {
54 work = xchg(&task->task_works, NULL);
55 if (!work)
56 break;
57 /*
58 * Synchronize with task_work_cancel(). It can't remove
59 * the first entry == work, cmpxchg(task_works) should
60 * fail, but it can play with *work and other entries.
61 */
62 raw_spin_unlock_wait(&task->pi_lock);
63 smp_mb();
Oleg Nesterove73f8952012-05-11 10:59:07 +100064
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020065 /* Reverse the list to run the works in fifo order */
66 head = NULL;
67 do {
68 next = work->next;
69 work->next = head;
70 head = work;
71 work = next;
72 } while (work);
Oleg Nesterove73f8952012-05-11 10:59:07 +100073
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020074 work = head;
75 do {
76 next = work->next;
77 work->func(work);
78 work = next;
Eric Dumazetf3418612012-08-21 15:05:14 +020079 cond_resched();
Oleg Nesterovac3d0da2012-08-26 21:12:09 +020080 } while (work);
Oleg Nesterove73f8952012-05-11 10:59:07 +100081 }
82}