blob: b7bb37ab03bcfe0bc47c44a2b339c162c8e806ba [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070048 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50 int run_depth; /* Detect run_workqueue() recursion depth */
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080051
52 int freezeable; /* Freeze the thread during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070053} ____cacheline_aligned;
54
55/*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080060 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 const char *name;
62 struct list_head list; /* Empty if single thread */
63};
64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070067static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static LIST_HEAD(workqueues);
69
Nathan Lynchf756d5e2006-01-08 01:05:12 -080070static int singlethread_cpu;
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* If it's single threaded, it isn't in the list of workqueues. */
73static inline int is_single_threaded(struct workqueue_struct *wq)
74{
75 return list_empty(&wq->list);
76}
77
David Howells4594bf12006-12-07 11:33:26 +000078/*
79 * Set the workqueue on which a work item is to be run
80 * - Must *only* be called if the pending flag is set
81 */
David Howells365970a2006-11-22 14:54:49 +000082static inline void set_wq_data(struct work_struct *work, void *wq)
83{
David Howells4594bf12006-12-07 11:33:26 +000084 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000085
David Howells4594bf12006-12-07 11:33:26 +000086 BUG_ON(!work_pending(work));
87
David Howells365970a2006-11-22 14:54:49 +000088 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080089 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
90 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +000091}
92
93static inline void *get_wq_data(struct work_struct *work)
94{
Linus Torvaldsa08727b2006-12-16 09:53:50 -080095 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +000096}
97
Linus Torvalds68380b52006-12-07 09:28:19 -080098static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
99{
100 int ret = 0;
101 unsigned long flags;
102
103 spin_lock_irqsave(&cwq->lock, flags);
104 /*
105 * We need to re-validate the work info after we've gotten
106 * the cpu_workqueue lock. We can run the work now iff:
107 *
108 * - the wq_data still matches the cpu_workqueue_struct
109 * - AND the work is still marked pending
110 * - AND the work is still on a list (which will be this
111 * workqueue_struct list)
112 *
113 * All these conditions are important, because we
114 * need to protect against the work being run right
115 * now on another CPU (all but the last one might be
116 * true if it's currently running and has not been
117 * released yet, for example).
118 */
119 if (get_wq_data(work) == cwq
120 && work_pending(work)
121 && !list_empty(&work->entry)) {
122 work_func_t f = work->func;
123 list_del_init(&work->entry);
124 spin_unlock_irqrestore(&cwq->lock, flags);
125
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800126 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
Linus Torvalds68380b52006-12-07 09:28:19 -0800127 work_release(work);
128 f(work);
129
130 spin_lock_irqsave(&cwq->lock, flags);
Linus Torvalds68380b52006-12-07 09:28:19 -0800131 ret = 1;
132 }
133 spin_unlock_irqrestore(&cwq->lock, flags);
134 return ret;
135}
136
137/**
138 * run_scheduled_work - run scheduled work synchronously
139 * @work: work to run
140 *
141 * This checks if the work was pending, and runs it
142 * synchronously if so. It returns a boolean to indicate
143 * whether it had any scheduled work to run or not.
144 *
145 * NOTE! This _only_ works for normal work_structs. You
146 * CANNOT use this for delayed work, because the wq data
147 * for delayed work will not point properly to the per-
148 * CPU workqueue struct, but will change!
149 */
150int fastcall run_scheduled_work(struct work_struct *work)
151{
152 for (;;) {
153 struct cpu_workqueue_struct *cwq;
154
155 if (!work_pending(work))
156 return 0;
157 if (list_empty(&work->entry))
158 return 0;
159 /* NOTE! This depends intimately on __queue_work! */
160 cwq = get_wq_data(work);
161 if (!cwq)
162 return 0;
163 if (__run_work(cwq, work))
164 return 1;
165 }
166}
167EXPORT_SYMBOL(run_scheduled_work);
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169/* Preempt must be disabled. */
170static void __queue_work(struct cpu_workqueue_struct *cwq,
171 struct work_struct *work)
172{
173 unsigned long flags;
174
175 spin_lock_irqsave(&cwq->lock, flags);
David Howells365970a2006-11-22 14:54:49 +0000176 set_wq_data(work, cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 list_add_tail(&work->entry, &cwq->worklist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 wake_up(&cwq->more_work);
179 spin_unlock_irqrestore(&cwq->lock, flags);
180}
181
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700182/**
183 * queue_work - queue work on a workqueue
184 * @wq: workqueue to use
185 * @work: work to queue
186 *
Alan Stern057647f2006-10-28 10:38:58 -0700187 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 *
189 * We queue the work to the CPU it was submitted, but there is no
190 * guarantee that it will be processed by that CPU.
191 */
192int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
193{
194 int ret = 0, cpu = get_cpu();
195
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800196 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800198 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800200 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 ret = 1;
202 }
203 put_cpu();
204 return ret;
205}
Dave Jonesae90dd52006-06-30 01:40:45 -0400206EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800208void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
David Howells52bad642006-11-22 14:54:01 +0000210 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000211 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 int cpu = smp_processor_id();
213
214 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800215 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
David Howells52bad642006-11-22 14:54:01 +0000217 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700220/**
221 * queue_delayed_work - queue work on a workqueue after delay
222 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800223 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700224 * @delay: number of jiffies to wait before queueing
225 *
Alan Stern057647f2006-10-28 10:38:58 -0700226 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700227 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000229 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230{
231 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000232 struct timer_list *timer = &dwork->timer;
233 struct work_struct *work = &dwork->work;
234
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800235 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000236 if (delay == 0)
237 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800239 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 BUG_ON(timer_pending(timer));
241 BUG_ON(!list_empty(&work->entry));
242
243 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000244 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000246 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 timer->function = delayed_work_timer_fn;
248 add_timer(timer);
249 ret = 1;
250 }
251 return ret;
252}
Dave Jonesae90dd52006-06-30 01:40:45 -0400253EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700255/**
256 * queue_delayed_work_on - queue work on specific CPU after delay
257 * @cpu: CPU number to execute work on
258 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800259 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700260 * @delay: number of jiffies to wait before queueing
261 *
Alan Stern057647f2006-10-28 10:38:58 -0700262 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700263 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700264int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000265 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700266{
267 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000268 struct timer_list *timer = &dwork->timer;
269 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700270
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800271 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700272 BUG_ON(timer_pending(timer));
273 BUG_ON(!list_empty(&work->entry));
274
275 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000276 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700277 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000278 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700279 timer->function = delayed_work_timer_fn;
280 add_timer_on(timer, cpu);
281 ret = 1;
282 }
283 return ret;
284}
Dave Jonesae90dd52006-06-30 01:40:45 -0400285EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Arjan van de Ven858119e2006-01-14 13:20:43 -0800287static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 unsigned long flags;
290
291 /*
292 * Keep taking off work from the queue until
293 * done.
294 */
295 spin_lock_irqsave(&cwq->lock, flags);
296 cwq->run_depth++;
297 if (cwq->run_depth > 3) {
298 /* morton gets to eat his hat */
299 printk("%s: recursion depth exceeded: %d\n",
300 __FUNCTION__, cwq->run_depth);
301 dump_stack();
302 }
303 while (!list_empty(&cwq->worklist)) {
304 struct work_struct *work = list_entry(cwq->worklist.next,
305 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000306 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
308 list_del_init(cwq->worklist.next);
309 spin_unlock_irqrestore(&cwq->lock, flags);
310
David Howells365970a2006-11-22 14:54:49 +0000311 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800312 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000313 work_release(work);
314 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800316 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
317 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
318 "%s/0x%08x/%d\n",
319 current->comm, preempt_count(),
320 current->pid);
321 printk(KERN_ERR " last function: ");
322 print_symbol("%s\n", (unsigned long)f);
323 debug_show_held_locks(current);
324 dump_stack();
325 }
326
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 spin_lock_irqsave(&cwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
329 cwq->run_depth--;
330 spin_unlock_irqrestore(&cwq->lock, flags);
331}
332
333static int worker_thread(void *__cwq)
334{
335 struct cpu_workqueue_struct *cwq = __cwq;
336 DECLARE_WAITQUEUE(wait, current);
337 struct k_sigaction sa;
338 sigset_t blocked;
339
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800340 if (!cwq->freezeable)
341 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 set_user_nice(current, -5);
344
345 /* Block and flush all signals */
346 sigfillset(&blocked);
347 sigprocmask(SIG_BLOCK, &blocked, NULL);
348 flush_signals(current);
349
Christoph Lameter46934022006-10-11 01:21:26 -0700350 /*
351 * We inherited MPOL_INTERLEAVE from the booting kernel.
352 * Set MPOL_DEFAULT to insure node local allocations.
353 */
354 numa_default_policy();
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
357 sa.sa.sa_handler = SIG_IGN;
358 sa.sa.sa_flags = 0;
359 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
360 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
361
362 set_current_state(TASK_INTERRUPTIBLE);
363 while (!kthread_should_stop()) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800364 if (cwq->freezeable)
365 try_to_freeze();
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 add_wait_queue(&cwq->more_work, &wait);
368 if (list_empty(&cwq->worklist))
369 schedule();
370 else
371 __set_current_state(TASK_RUNNING);
372 remove_wait_queue(&cwq->more_work, &wait);
373
374 if (!list_empty(&cwq->worklist))
375 run_workqueue(cwq);
376 set_current_state(TASK_INTERRUPTIBLE);
377 }
378 __set_current_state(TASK_RUNNING);
379 return 0;
380}
381
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700382struct wq_barrier {
383 struct work_struct work;
384 struct completion done;
385};
386
387static void wq_barrier_func(struct work_struct *work)
388{
389 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
390 complete(&barr->done);
391}
392
393static inline void init_wq_barrier(struct wq_barrier *barr)
394{
395 INIT_WORK(&barr->work, wq_barrier_func);
396 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
397
398 init_completion(&barr->done);
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
402{
403 if (cwq->thread == current) {
404 /*
405 * Probably keventd trying to flush its own queue. So simply run
406 * it by hand rather than deadlocking.
407 */
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700408 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 run_workqueue(cwq);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700410 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700412 struct wq_barrier barr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700414 init_wq_barrier(&barr);
415 __queue_work(cwq, &barr.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700417 mutex_unlock(&workqueue_mutex);
418 wait_for_completion(&barr.done);
419 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 }
421}
422
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700423/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700425 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 *
427 * Forces execution of the workqueue and blocks until its completion.
428 * This is typically used in driver shutdown handlers.
429 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700430 * We sleep until all works which were queued on entry have been handled,
431 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 *
433 * This function used to run the workqueues itself. Now we just wait for the
434 * helper threads to do it.
435 */
436void fastcall flush_workqueue(struct workqueue_struct *wq)
437{
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700438 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 if (is_single_threaded(wq)) {
Ben Collinsbce61dd2005-11-28 13:43:56 -0800440 /* Always use first cpu's area. */
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800441 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 } else {
443 int cpu;
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 for_each_online_cpu(cpu)
Christoph Lameter89ada672005-10-30 15:01:59 -0800446 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700448 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
Dave Jonesae90dd52006-06-30 01:40:45 -0400450EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800453 int cpu, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
Christoph Lameter89ada672005-10-30 15:01:59 -0800455 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 struct task_struct *p;
457
458 spin_lock_init(&cwq->lock);
459 cwq->wq = wq;
460 cwq->thread = NULL;
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800461 cwq->freezeable = freezeable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 INIT_LIST_HEAD(&cwq->worklist);
463 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 if (is_single_threaded(wq))
466 p = kthread_create(worker_thread, cwq, "%s", wq->name);
467 else
468 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
469 if (IS_ERR(p))
470 return NULL;
471 cwq->thread = p;
472 return p;
473}
474
475struct workqueue_struct *__create_workqueue(const char *name,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800476 int singlethread, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
478 int cpu, destroy = 0;
479 struct workqueue_struct *wq;
480 struct task_struct *p;
481
Pekka J Enbergdd392712005-09-06 15:18:31 -0700482 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 if (!wq)
484 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Christoph Lameter89ada672005-10-30 15:01:59 -0800486 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Ben Collins676121f2006-01-08 01:03:04 -0800487 if (!wq->cpu_wq) {
488 kfree(wq);
489 return NULL;
490 }
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 wq->name = name;
Andrew Morton9b41ea72006-08-13 23:24:26 -0700493 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 if (singlethread) {
495 INIT_LIST_HEAD(&wq->list);
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800496 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 if (!p)
498 destroy = 1;
499 else
500 wake_up_process(p);
501 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 list_add(&wq->list, &workqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 for_each_online_cpu(cpu) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800504 p = create_workqueue_thread(wq, cpu, freezeable);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 if (p) {
506 kthread_bind(p, cpu);
507 wake_up_process(p);
508 } else
509 destroy = 1;
510 }
511 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700512 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 /*
515 * Was there any error during startup? If yes then clean up:
516 */
517 if (destroy) {
518 destroy_workqueue(wq);
519 wq = NULL;
520 }
521 return wq;
522}
Dave Jonesae90dd52006-06-30 01:40:45 -0400523EXPORT_SYMBOL_GPL(__create_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
526{
527 struct cpu_workqueue_struct *cwq;
528 unsigned long flags;
529 struct task_struct *p;
530
Christoph Lameter89ada672005-10-30 15:01:59 -0800531 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 spin_lock_irqsave(&cwq->lock, flags);
533 p = cwq->thread;
534 cwq->thread = NULL;
535 spin_unlock_irqrestore(&cwq->lock, flags);
536 if (p)
537 kthread_stop(p);
538}
539
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700540/**
541 * destroy_workqueue - safely terminate a workqueue
542 * @wq: target workqueue
543 *
544 * Safely destroy a workqueue. All work currently pending will be done first.
545 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546void destroy_workqueue(struct workqueue_struct *wq)
547{
548 int cpu;
549
550 flush_workqueue(wq);
551
552 /* We don't need the distraction of CPUs appearing and vanishing. */
Andrew Morton9b41ea72006-08-13 23:24:26 -0700553 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800555 cleanup_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 else {
557 for_each_online_cpu(cpu)
558 cleanup_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 list_del(&wq->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700561 mutex_unlock(&workqueue_mutex);
Christoph Lameter89ada672005-10-30 15:01:59 -0800562 free_percpu(wq->cpu_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 kfree(wq);
564}
Dave Jonesae90dd52006-06-30 01:40:45 -0400565EXPORT_SYMBOL_GPL(destroy_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567static struct workqueue_struct *keventd_wq;
568
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700569/**
570 * schedule_work - put work task in global workqueue
571 * @work: job to be done
572 *
573 * This puts a job in the kernel-global workqueue.
574 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575int fastcall schedule_work(struct work_struct *work)
576{
577 return queue_work(keventd_wq, work);
578}
Dave Jonesae90dd52006-06-30 01:40:45 -0400579EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700581/**
582 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000583 * @dwork: job to be done
584 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700585 *
586 * After waiting for a given time this puts a job in the kernel-global
587 * workqueue.
588 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800589int fastcall schedule_delayed_work(struct delayed_work *dwork,
590 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800592 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000593 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594}
Dave Jonesae90dd52006-06-30 01:40:45 -0400595EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700597/**
598 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
599 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000600 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700601 * @delay: number of jiffies to wait
602 *
603 * After waiting for a given time this puts a job in the kernel-global
604 * workqueue on the specified CPU.
605 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000607 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
David Howells52bad642006-11-22 14:54:01 +0000609 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
Dave Jonesae90dd52006-06-30 01:40:45 -0400611EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Andrew Mortonb6136772006-06-25 05:47:49 -0700613/**
614 * schedule_on_each_cpu - call a function on each online CPU from keventd
615 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700616 *
617 * Returns zero on success.
618 * Returns -ve errno on failure.
619 *
620 * Appears to be racy against CPU hotplug.
621 *
622 * schedule_on_each_cpu() is very slow.
623 */
David Howells65f27f32006-11-22 14:55:48 +0000624int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800625{
626 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700627 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800628
Andrew Mortonb6136772006-06-25 05:47:49 -0700629 works = alloc_percpu(struct work_struct);
630 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800631 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700632
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700633 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800634 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100635 struct work_struct *work = per_cpu_ptr(works, cpu);
636
637 INIT_WORK(work, func);
638 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
639 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800640 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700641 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800642 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700643 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800644 return 0;
645}
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647void flush_scheduled_work(void)
648{
649 flush_workqueue(keventd_wq);
650}
Dave Jonesae90dd52006-06-30 01:40:45 -0400651EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800654 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000656 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 */
James Bottomley81ddef72005-04-16 15:23:59 -0700658void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000659 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
David Howells52bad642006-11-22 14:54:01 +0000661 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 flush_workqueue(wq);
663}
James Bottomley81ddef72005-04-16 15:23:59 -0700664EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800667 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000668 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 */
David Howells52bad642006-11-22 14:54:01 +0000670void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
David Howells52bad642006-11-22 14:54:01 +0000672 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674EXPORT_SYMBOL(cancel_rearming_delayed_work);
675
James Bottomley1fa44ec2006-02-23 12:43:43 -0600676/**
677 * execute_in_process_context - reliably execute the routine with user context
678 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600679 * @ew: guaranteed storage for the execute work structure (must
680 * be available when the work executes)
681 *
682 * Executes the function immediately if process context is available,
683 * otherwise schedules the function for delayed execution.
684 *
685 * Returns: 0 - function was executed
686 * 1 - function was scheduled for execution
687 */
David Howells65f27f32006-11-22 14:55:48 +0000688int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600689{
690 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000691 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600692 return 0;
693 }
694
David Howells65f27f32006-11-22 14:55:48 +0000695 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600696 schedule_work(&ew->work);
697
698 return 1;
699}
700EXPORT_SYMBOL_GPL(execute_in_process_context);
701
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702int keventd_up(void)
703{
704 return keventd_wq != NULL;
705}
706
707int current_is_keventd(void)
708{
709 struct cpu_workqueue_struct *cwq;
710 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
711 int ret = 0;
712
713 BUG_ON(!keventd_wq);
714
Christoph Lameter89ada672005-10-30 15:01:59 -0800715 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (current == cwq->thread)
717 ret = 1;
718
719 return ret;
720
721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/* Take the work from this (downed) CPU. */
724static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
725{
Christoph Lameter89ada672005-10-30 15:01:59 -0800726 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700727 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 struct work_struct *work;
729
730 spin_lock_irq(&cwq->lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700731 list_replace_init(&cwq->worklist, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 while (!list_empty(&list)) {
734 printk("Taking work for %s\n", wq->name);
735 work = list_entry(list.next,struct work_struct,entry);
736 list_del(&work->entry);
Christoph Lameter89ada672005-10-30 15:01:59 -0800737 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 }
739 spin_unlock_irq(&cwq->lock);
740}
741
742/* We're holding the cpucontrol mutex here */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700743static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 unsigned long action,
745 void *hcpu)
746{
747 unsigned int hotcpu = (unsigned long)hcpu;
748 struct workqueue_struct *wq;
749
750 switch (action) {
751 case CPU_UP_PREPARE:
Andrew Morton9b41ea72006-08-13 23:24:26 -0700752 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 /* Create a new workqueue thread for it. */
754 list_for_each_entry(wq, &workqueues, list) {
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800755 if (!create_workqueue_thread(wq, hotcpu, 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 printk("workqueue for %i failed\n", hotcpu);
757 return NOTIFY_BAD;
758 }
759 }
760 break;
761
762 case CPU_ONLINE:
763 /* Kick off worker threads. */
764 list_for_each_entry(wq, &workqueues, list) {
Christoph Lameter89ada672005-10-30 15:01:59 -0800765 struct cpu_workqueue_struct *cwq;
766
767 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
768 kthread_bind(cwq->thread, hotcpu);
769 wake_up_process(cwq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700771 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 break;
773
774 case CPU_UP_CANCELED:
775 list_for_each_entry(wq, &workqueues, list) {
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700776 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
777 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 /* Unbind so it can run. */
Christoph Lameter89ada672005-10-30 15:01:59 -0800779 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800780 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 cleanup_workqueue_thread(wq, hotcpu);
782 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700783 mutex_unlock(&workqueue_mutex);
784 break;
785
786 case CPU_DOWN_PREPARE:
787 mutex_lock(&workqueue_mutex);
788 break;
789
790 case CPU_DOWN_FAILED:
791 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 break;
793
794 case CPU_DEAD:
795 list_for_each_entry(wq, &workqueues, list)
796 cleanup_workqueue_thread(wq, hotcpu);
797 list_for_each_entry(wq, &workqueues, list)
798 take_over_work(wq, hotcpu);
Andrew Morton9b41ea72006-08-13 23:24:26 -0700799 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 break;
801 }
802
803 return NOTIFY_OK;
804}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806void init_workqueues(void)
807{
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800808 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 hotcpu_notifier(workqueue_cpu_callback, 0);
810 keventd_wq = create_workqueue("events");
811 BUG_ON(!keventd_wq);
812}
813