blob: f7ab703285a643e47f007df41dae3cca4930954d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
Francois Camie1f8e872008-10-15 22:01:59 -070012 * Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37/*
Tejun Heo4690c4a2010-06-29 10:07:10 +020038 * Structure fields follow one of the following exclusion rules.
39 *
40 * I: Set during initialization and read-only afterwards.
41 *
42 * L: cwq->lock protected. Access with cwq->lock held.
43 *
44 * W: workqueue_lock protected.
45 */
46
47/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080048 * The per-CPU workqueue (if single thread, we always use the first
49 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 */
51struct cpu_workqueue_struct {
52
53 spinlock_t lock;
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 struct list_head worklist;
56 wait_queue_head_t more_work;
Oleg Nesterov3af244332007-05-09 02:34:09 -070057 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Tejun Heo4690c4a2010-06-29 10:07:10 +020059 struct workqueue_struct *wq; /* I: the owning workqueue */
60 struct task_struct *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061} ____cacheline_aligned;
62
63/*
64 * The externally visible workqueue abstraction is an array of
65 * per-CPU workqueues:
66 */
67struct workqueue_struct {
Tejun Heo97e37d72010-06-29 10:07:10 +020068 unsigned int flags; /* I: WQ_* flags */
Tejun Heo4690c4a2010-06-29 10:07:10 +020069 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
70 struct list_head list; /* W: list of all workqueues */
71 const char *name; /* I: workqueue name */
Johannes Berg4e6045f2007-10-18 23:39:55 -070072#ifdef CONFIG_LOCKDEP
Tejun Heo4690c4a2010-06-29 10:07:10 +020073 struct lockdep_map lockdep_map;
Johannes Berg4e6045f2007-10-18 23:39:55 -070074#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070075};
76
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +090077#ifdef CONFIG_DEBUG_OBJECTS_WORK
78
79static struct debug_obj_descr work_debug_descr;
80
81/*
82 * fixup_init is called when:
83 * - an active object is initialized
84 */
85static int work_fixup_init(void *addr, enum debug_obj_state state)
86{
87 struct work_struct *work = addr;
88
89 switch (state) {
90 case ODEBUG_STATE_ACTIVE:
91 cancel_work_sync(work);
92 debug_object_init(work, &work_debug_descr);
93 return 1;
94 default:
95 return 0;
96 }
97}
98
99/*
100 * fixup_activate is called when:
101 * - an active object is activated
102 * - an unknown object is activated (might be a statically initialized object)
103 */
104static int work_fixup_activate(void *addr, enum debug_obj_state state)
105{
106 struct work_struct *work = addr;
107
108 switch (state) {
109
110 case ODEBUG_STATE_NOTAVAILABLE:
111 /*
112 * This is not really a fixup. The work struct was
113 * statically initialized. We just make sure that it
114 * is tracked in the object tracker.
115 */
Tejun Heo22df02b2010-06-29 10:07:10 +0200116 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900117 debug_object_init(work, &work_debug_descr);
118 debug_object_activate(work, &work_debug_descr);
119 return 0;
120 }
121 WARN_ON_ONCE(1);
122 return 0;
123
124 case ODEBUG_STATE_ACTIVE:
125 WARN_ON(1);
126
127 default:
128 return 0;
129 }
130}
131
132/*
133 * fixup_free is called when:
134 * - an active object is freed
135 */
136static int work_fixup_free(void *addr, enum debug_obj_state state)
137{
138 struct work_struct *work = addr;
139
140 switch (state) {
141 case ODEBUG_STATE_ACTIVE:
142 cancel_work_sync(work);
143 debug_object_free(work, &work_debug_descr);
144 return 1;
145 default:
146 return 0;
147 }
148}
149
150static struct debug_obj_descr work_debug_descr = {
151 .name = "work_struct",
152 .fixup_init = work_fixup_init,
153 .fixup_activate = work_fixup_activate,
154 .fixup_free = work_fixup_free,
155};
156
157static inline void debug_work_activate(struct work_struct *work)
158{
159 debug_object_activate(work, &work_debug_descr);
160}
161
162static inline void debug_work_deactivate(struct work_struct *work)
163{
164 debug_object_deactivate(work, &work_debug_descr);
165}
166
167void __init_work(struct work_struct *work, int onstack)
168{
169 if (onstack)
170 debug_object_init_on_stack(work, &work_debug_descr);
171 else
172 debug_object_init(work, &work_debug_descr);
173}
174EXPORT_SYMBOL_GPL(__init_work);
175
176void destroy_work_on_stack(struct work_struct *work)
177{
178 debug_object_free(work, &work_debug_descr);
179}
180EXPORT_SYMBOL_GPL(destroy_work_on_stack);
181
182#else
183static inline void debug_work_activate(struct work_struct *work) { }
184static inline void debug_work_deactivate(struct work_struct *work) { }
185#endif
186
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100187/* Serializes the accesses to the list of workqueues. */
188static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189static LIST_HEAD(workqueues);
190
Oleg Nesterov3af244332007-05-09 02:34:09 -0700191static int singlethread_cpu __read_mostly;
Rusty Russelle7577c52009-01-01 10:12:25 +1030192static const struct cpumask *cpu_singlethread_map __read_mostly;
Oleg Nesterov14441962007-05-23 13:57:57 -0700193/*
194 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
195 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
196 * which comes in between can't use for_each_online_cpu(). We could
197 * use cpu_possible_map, the cpumask below is more a documentation
198 * than optimization.
199 */
Rusty Russelle7577c52009-01-01 10:12:25 +1030200static cpumask_var_t cpu_populated_map __read_mostly;
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/* If it's single threaded, it isn't in the list of workqueues. */
Tejun Heo97e37d72010-06-29 10:07:10 +0200203static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Tejun Heo97e37d72010-06-29 10:07:10 +0200205 return wq->flags & WQ_SINGLE_THREAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Rusty Russelle7577c52009-01-01 10:12:25 +1030208static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700209{
David Howells6cc88bc2008-11-14 10:39:21 +1100210 return is_wq_single_threaded(wq)
Rusty Russelle7577c52009-01-01 10:12:25 +1030211 ? cpu_singlethread_map : cpu_populated_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700212}
213
Tejun Heo4690c4a2010-06-29 10:07:10 +0200214static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
215 struct workqueue_struct *wq)
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700216{
David Howells6cc88bc2008-11-14 10:39:21 +1100217 if (unlikely(is_wq_single_threaded(wq)))
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700218 cpu = singlethread_cpu;
219 return per_cpu_ptr(wq->cpu_wq, cpu);
220}
221
David Howells4594bf12006-12-07 11:33:26 +0000222/*
223 * Set the workqueue on which a work item is to be run
224 * - Must *only* be called if the pending flag is set
225 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700226static inline void set_wq_data(struct work_struct *work,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200227 struct cpu_workqueue_struct *cwq,
228 unsigned long extra_flags)
David Howells365970a2006-11-22 14:54:49 +0000229{
David Howells4594bf12006-12-07 11:33:26 +0000230 BUG_ON(!work_pending(work));
231
Tejun Heo4690c4a2010-06-29 10:07:10 +0200232 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
Tejun Heo22df02b2010-06-29 10:07:10 +0200233 WORK_STRUCT_PENDING | extra_flags);
David Howells365970a2006-11-22 14:54:49 +0000234}
235
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200236/*
237 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
238 */
239static inline void clear_wq_data(struct work_struct *work)
240{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200241 atomic_long_set(&work->data, work_static(work));
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200242}
243
Tejun Heo64166692010-06-29 10:07:11 +0200244static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000245{
Tejun Heo64166692010-06-29 10:07:11 +0200246 return (void *)(atomic_long_read(&work->data) &
247 WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000248}
249
Tejun Heo4690c4a2010-06-29 10:07:10 +0200250/**
251 * insert_work - insert a work into cwq
252 * @cwq: cwq @work belongs to
253 * @work: work to insert
254 * @head: insertion point
255 * @extra_flags: extra WORK_STRUCT_* flags to set
256 *
257 * Insert @work into @cwq after @head.
258 *
259 * CONTEXT:
260 * spin_lock_irq(cwq->lock).
261 */
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700262static void insert_work(struct cpu_workqueue_struct *cwq,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200263 struct work_struct *work, struct list_head *head,
264 unsigned int extra_flags)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700265{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200266 /* we own @work, set data and link */
267 set_wq_data(work, cwq, extra_flags);
268
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700269 /*
270 * Ensure that we get the right work->data if we see the
271 * result of list_add() below, see try_to_grab_pending().
272 */
273 smp_wmb();
Tejun Heo4690c4a2010-06-29 10:07:10 +0200274
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700275 list_add_tail(&work->entry, head);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700276 wake_up(&cwq->more_work);
277}
278
Tejun Heo4690c4a2010-06-29 10:07:10 +0200279static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 struct work_struct *work)
281{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200282 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 unsigned long flags;
284
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900285 debug_work_activate(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 spin_lock_irqsave(&cwq->lock, flags);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200287 BUG_ON(!list_empty(&work->entry));
288 insert_work(cwq, work, &cwq->worklist, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 spin_unlock_irqrestore(&cwq->lock, flags);
290}
291
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700292/**
293 * queue_work - queue work on a workqueue
294 * @wq: workqueue to use
295 * @work: work to queue
296 *
Alan Stern057647f2006-10-28 10:38:58 -0700297 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700299 * We queue the work to the CPU on which it was submitted, but if the CPU dies
300 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800302int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700304 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700306 ret = queue_work_on(get_cpu(), wq, work);
307 put_cpu();
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 return ret;
310}
Dave Jonesae90dd52006-06-30 01:40:45 -0400311EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Zhang Ruic1a220e2008-07-23 21:28:39 -0700313/**
314 * queue_work_on - queue work on specific cpu
315 * @cpu: CPU number to execute work on
316 * @wq: workqueue to use
317 * @work: work to queue
318 *
319 * Returns 0 if @work was already on a queue, non-zero otherwise.
320 *
321 * We queue the work to a specific CPU, the caller must ensure it
322 * can't go away.
323 */
324int
325queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
326{
327 int ret = 0;
328
Tejun Heo22df02b2010-06-29 10:07:10 +0200329 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo4690c4a2010-06-29 10:07:10 +0200330 __queue_work(cpu, wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700331 ret = 1;
332 }
333 return ret;
334}
335EXPORT_SYMBOL_GPL(queue_work_on);
336
Li Zefan6d141c32008-02-08 04:21:09 -0800337static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
David Howells52bad642006-11-22 14:54:01 +0000339 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700340 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Tejun Heo4690c4a2010-06-29 10:07:10 +0200342 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700345/**
346 * queue_delayed_work - queue work on a workqueue after delay
347 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800348 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700349 * @delay: number of jiffies to wait before queueing
350 *
Alan Stern057647f2006-10-28 10:38:58 -0700351 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700352 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800353int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000354 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
David Howells52bad642006-11-22 14:54:01 +0000356 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700357 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700359 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360}
Dave Jonesae90dd52006-06-30 01:40:45 -0400361EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700363/**
364 * queue_delayed_work_on - queue work on specific CPU after delay
365 * @cpu: CPU number to execute work on
366 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800367 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700368 * @delay: number of jiffies to wait before queueing
369 *
Alan Stern057647f2006-10-28 10:38:58 -0700370 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700371 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700372int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000373 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700374{
375 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000376 struct timer_list *timer = &dwork->timer;
377 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700378
Tejun Heo22df02b2010-06-29 10:07:10 +0200379 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700380 BUG_ON(timer_pending(timer));
381 BUG_ON(!list_empty(&work->entry));
382
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700383 timer_stats_timer_set_start_info(&dwork->timer);
384
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700385 /* This stores cwq for the moment, for the timer_fn */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200386 set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700387 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000388 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700389 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700390
391 if (unlikely(cpu >= 0))
392 add_timer_on(timer, cpu);
393 else
394 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700395 ret = 1;
396 }
397 return ret;
398}
Dave Jonesae90dd52006-06-30 01:40:45 -0400399EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Tejun Heoa62428c2010-06-29 10:07:10 +0200401/**
402 * process_one_work - process single work
403 * @cwq: cwq to process work for
404 * @work: work to process
405 *
406 * Process @work. This function contains all the logics necessary to
407 * process a single work including synchronization against and
408 * interaction with other workers on the same cpu, queueing and
409 * flushing. As long as context requirement is met, any worker can
410 * call this function to process a work.
411 *
412 * CONTEXT:
413 * spin_lock_irq(cwq->lock) which is released and regrabbed.
414 */
415static void process_one_work(struct cpu_workqueue_struct *cwq,
416 struct work_struct *work)
417{
418 work_func_t f = work->func;
419#ifdef CONFIG_LOCKDEP
420 /*
421 * It is permissible to free the struct work_struct from
422 * inside the function that is called from it, this we need to
423 * take into account for lockdep too. To avoid bogus "held
424 * lock freed" warnings as well as problems when looking into
425 * work->lockdep_map, make a copy and use that here.
426 */
427 struct lockdep_map lockdep_map = work->lockdep_map;
428#endif
429 /* claim and process */
Tejun Heoa62428c2010-06-29 10:07:10 +0200430 debug_work_deactivate(work);
431 cwq->current_work = work;
432 list_del_init(&work->entry);
433
434 spin_unlock_irq(&cwq->lock);
435
436 BUG_ON(get_wq_data(work) != cwq);
437 work_clear_pending(work);
438 lock_map_acquire(&cwq->wq->lockdep_map);
439 lock_map_acquire(&lockdep_map);
440 f(work);
441 lock_map_release(&lockdep_map);
442 lock_map_release(&cwq->wq->lockdep_map);
443
444 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
445 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
446 "%s/0x%08x/%d\n",
447 current->comm, preempt_count(), task_pid_nr(current));
448 printk(KERN_ERR " last function: ");
449 print_symbol("%s\n", (unsigned long)f);
450 debug_show_held_locks(current);
451 dump_stack();
452 }
453
454 spin_lock_irq(&cwq->lock);
455
456 /* we're done with it, release */
457 cwq->current_work = NULL;
458}
459
Arjan van de Ven858119e2006-01-14 13:20:43 -0800460static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700462 spin_lock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 while (!list_empty(&cwq->worklist)) {
464 struct work_struct *work = list_entry(cwq->worklist.next,
465 struct work_struct, entry);
Tejun Heoa62428c2010-06-29 10:07:10 +0200466 process_one_work(cwq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 }
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700468 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
Tejun Heo4690c4a2010-06-29 10:07:10 +0200471/**
472 * worker_thread - the worker thread function
473 * @__cwq: cwq to serve
474 *
475 * The cwq worker thread function.
476 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477static int worker_thread(void *__cwq)
478{
479 struct cpu_workqueue_struct *cwq = __cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700480 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Tejun Heo97e37d72010-06-29 10:07:10 +0200482 if (cwq->wq->flags & WQ_FREEZEABLE)
Rafael J. Wysocki83144182007-07-17 04:03:35 -0700483 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
Oleg Nesterov3af244332007-05-09 02:34:09 -0700485 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700486 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Oleg Nesterov14441962007-05-23 13:57:57 -0700487 if (!freezing(current) &&
488 !kthread_should_stop() &&
489 list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700491 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Oleg Nesterov85f41862007-05-09 02:34:20 -0700493 try_to_freeze();
494
Oleg Nesterov14441962007-05-23 13:57:57 -0700495 if (kthread_should_stop())
Oleg Nesterov3af244332007-05-09 02:34:09 -0700496 break;
497
498 run_workqueue(cwq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700500
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 return 0;
502}
503
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700504struct wq_barrier {
505 struct work_struct work;
506 struct completion done;
507};
508
509static void wq_barrier_func(struct work_struct *work)
510{
511 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
512 complete(&barr->done);
513}
514
Tejun Heo4690c4a2010-06-29 10:07:10 +0200515/**
516 * insert_wq_barrier - insert a barrier work
517 * @cwq: cwq to insert barrier into
518 * @barr: wq_barrier to insert
519 * @head: insertion point
520 *
521 * Insert barrier @barr into @cwq before @head.
522 *
523 * CONTEXT:
524 * spin_lock_irq(cwq->lock).
525 */
Oleg Nesterov83c22522007-05-09 02:33:54 -0700526static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700527 struct wq_barrier *barr, struct list_head *head)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700528{
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900529 /*
530 * debugobject calls are safe here even with cwq->lock locked
531 * as we know for sure that this will not trigger any of the
532 * checks and call back into the fixup functions where we
533 * might deadlock.
534 */
535 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
Tejun Heo22df02b2010-06-29 10:07:10 +0200536 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700537 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700538
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900539 debug_work_activate(&barr->work);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200540 insert_work(cwq, &barr->work, head, 0);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700541}
542
Oleg Nesterov14441962007-05-23 13:57:57 -0700543static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
Lai Jiangshan2355b702009-04-02 16:58:24 -0700545 int active = 0;
546 struct wq_barrier barr;
Oleg Nesterov14441962007-05-23 13:57:57 -0700547
Lai Jiangshan2355b702009-04-02 16:58:24 -0700548 WARN_ON(cwq->thread == current);
549
550 spin_lock_irq(&cwq->lock);
551 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
552 insert_wq_barrier(cwq, &barr, &cwq->worklist);
Oleg Nesterov14441962007-05-23 13:57:57 -0700553 active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
Lai Jiangshan2355b702009-04-02 16:58:24 -0700555 spin_unlock_irq(&cwq->lock);
556
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900557 if (active) {
Lai Jiangshan2355b702009-04-02 16:58:24 -0700558 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900559 destroy_work_on_stack(&barr.work);
560 }
Oleg Nesterov14441962007-05-23 13:57:57 -0700561
562 return active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563}
564
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700565/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700567 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 *
569 * Forces execution of the workqueue and blocks until its completion.
570 * This is typically used in driver shutdown handlers.
571 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700572 * We sleep until all works which were queued on entry have been handled,
573 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800575void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Rusty Russelle7577c52009-01-01 10:12:25 +1030577 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovcce1a162007-05-09 02:34:13 -0700578 int cpu;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700579
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700580 might_sleep();
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200581 lock_map_acquire(&wq->lockdep_map);
582 lock_map_release(&wq->lockdep_map);
Rusty Russellaa85ea52009-03-30 22:05:15 -0600583 for_each_cpu(cpu, cpu_map)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700584 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585}
Dave Jonesae90dd52006-06-30 01:40:45 -0400586EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Oleg Nesterovdb700892008-07-25 01:47:49 -0700588/**
589 * flush_work - block until a work_struct's callback has terminated
590 * @work: the work which is to be flushed
591 *
Oleg Nesterova67da702008-07-25 01:47:52 -0700592 * Returns false if @work has already terminated.
593 *
Oleg Nesterovdb700892008-07-25 01:47:49 -0700594 * It is expected that, prior to calling flush_work(), the caller has
595 * arranged for the work to not be requeued, otherwise it doesn't make
596 * sense to use this function.
597 */
598int flush_work(struct work_struct *work)
599{
600 struct cpu_workqueue_struct *cwq;
601 struct list_head *prev;
602 struct wq_barrier barr;
603
604 might_sleep();
605 cwq = get_wq_data(work);
606 if (!cwq)
607 return 0;
608
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200609 lock_map_acquire(&cwq->wq->lockdep_map);
610 lock_map_release(&cwq->wq->lockdep_map);
Oleg Nesterova67da702008-07-25 01:47:52 -0700611
Oleg Nesterovdb700892008-07-25 01:47:49 -0700612 spin_lock_irq(&cwq->lock);
613 if (!list_empty(&work->entry)) {
614 /*
615 * See the comment near try_to_grab_pending()->smp_rmb().
616 * If it was re-queued under us we are not going to wait.
617 */
618 smp_rmb();
619 if (unlikely(cwq != get_wq_data(work)))
Tejun Heo4690c4a2010-06-29 10:07:10 +0200620 goto already_gone;
Oleg Nesterovdb700892008-07-25 01:47:49 -0700621 prev = &work->entry;
622 } else {
623 if (cwq->current_work != work)
Tejun Heo4690c4a2010-06-29 10:07:10 +0200624 goto already_gone;
Oleg Nesterovdb700892008-07-25 01:47:49 -0700625 prev = &cwq->worklist;
626 }
627 insert_wq_barrier(cwq, &barr, prev->next);
Oleg Nesterovdb700892008-07-25 01:47:49 -0700628
Tejun Heo4690c4a2010-06-29 10:07:10 +0200629 spin_unlock_irq(&cwq->lock);
Oleg Nesterovdb700892008-07-25 01:47:49 -0700630 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900631 destroy_work_on_stack(&barr.work);
Oleg Nesterovdb700892008-07-25 01:47:49 -0700632 return 1;
Tejun Heo4690c4a2010-06-29 10:07:10 +0200633already_gone:
634 spin_unlock_irq(&cwq->lock);
635 return 0;
Oleg Nesterovdb700892008-07-25 01:47:49 -0700636}
637EXPORT_SYMBOL_GPL(flush_work);
638
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700639/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700640 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700641 * so this work can't be re-armed in any way.
642 */
643static int try_to_grab_pending(struct work_struct *work)
644{
645 struct cpu_workqueue_struct *cwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700646 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700647
Tejun Heo22df02b2010-06-29 10:07:10 +0200648 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700649 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700650
651 /*
652 * The queueing is in progress, or it is already queued. Try to
653 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
654 */
655
656 cwq = get_wq_data(work);
657 if (!cwq)
658 return ret;
659
660 spin_lock_irq(&cwq->lock);
661 if (!list_empty(&work->entry)) {
662 /*
663 * This work is queued, but perhaps we locked the wrong cwq.
664 * In that case we must see the new value after rmb(), see
665 * insert_work()->wmb().
666 */
667 smp_rmb();
668 if (cwq == get_wq_data(work)) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900669 debug_work_deactivate(work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700670 list_del_init(&work->entry);
671 ret = 1;
672 }
673 }
674 spin_unlock_irq(&cwq->lock);
675
676 return ret;
677}
678
679static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700680 struct work_struct *work)
681{
682 struct wq_barrier barr;
683 int running = 0;
684
685 spin_lock_irq(&cwq->lock);
686 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700687 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700688 running = 1;
689 }
690 spin_unlock_irq(&cwq->lock);
691
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900692 if (unlikely(running)) {
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700693 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900694 destroy_work_on_stack(&barr.work);
695 }
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700696}
697
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700698static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700699{
700 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700701 struct workqueue_struct *wq;
Rusty Russelle7577c52009-01-01 10:12:25 +1030702 const struct cpumask *cpu_map;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700703 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700704
Oleg Nesterovf293ea92007-05-09 02:34:10 -0700705 might_sleep();
706
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200707 lock_map_acquire(&work->lockdep_map);
708 lock_map_release(&work->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -0700709
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700710 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700711 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -0700712 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700713
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700714 wq = cwq->wq;
715 cpu_map = wq_cpu_map(wq);
716
Rusty Russellaa85ea52009-03-30 22:05:15 -0600717 for_each_cpu(cpu, cpu_map)
Tejun Heo4690c4a2010-06-29 10:07:10 +0200718 wait_on_cpu_work(get_cwq(cpu, wq), work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700719}
720
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700721static int __cancel_work_timer(struct work_struct *work,
722 struct timer_list* timer)
723{
724 int ret;
725
726 do {
727 ret = (timer && likely(del_timer(timer)));
728 if (!ret)
729 ret = try_to_grab_pending(work);
730 wait_on_work(work);
731 } while (unlikely(ret < 0));
732
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200733 clear_wq_data(work);
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700734 return ret;
735}
736
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700737/**
738 * cancel_work_sync - block until a work_struct's callback has terminated
739 * @work: the work which is to be flushed
740 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700741 * Returns true if @work was pending.
742 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700743 * cancel_work_sync() will cancel the work if it is queued. If the work's
744 * callback appears to be running, cancel_work_sync() will block until it
745 * has completed.
746 *
747 * It is possible to use this function if the work re-queues itself. It can
748 * cancel the work even if it migrates to another workqueue, however in that
749 * case it only guarantees that work->func() has completed on the last queued
750 * workqueue.
751 *
752 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
753 * pending, otherwise it goes into a busy-wait loop until the timer expires.
754 *
755 * The caller must ensure that workqueue_struct on which this work was last
756 * queued can't be destroyed before this function returns.
757 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700758int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700759{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700760 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700761}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -0700762EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700763
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700764/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700765 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700766 * @dwork: the delayed work struct
767 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700768 * Returns true if @dwork was pending.
769 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700770 * It is possible to use this function if @dwork rearms itself via queue_work()
771 * or queue_delayed_work(). See also the comment for cancel_work_sync().
772 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700773int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700774{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -0700775 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700776}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -0700777EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700779static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700781/**
782 * schedule_work - put work task in global workqueue
783 * @work: job to be done
784 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +0200785 * Returns zero if @work was already on the kernel-global workqueue and
786 * non-zero otherwise.
787 *
788 * This puts a job in the kernel-global workqueue if it was not already
789 * queued and leaves it in the same position on the kernel-global
790 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700791 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800792int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
794 return queue_work(keventd_wq, work);
795}
Dave Jonesae90dd52006-06-30 01:40:45 -0400796EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Zhang Ruic1a220e2008-07-23 21:28:39 -0700798/*
799 * schedule_work_on - put work task on a specific cpu
800 * @cpu: cpu to put the work task on
801 * @work: job to be done
802 *
803 * This puts a job on a specific cpu
804 */
805int schedule_work_on(int cpu, struct work_struct *work)
806{
807 return queue_work_on(cpu, keventd_wq, work);
808}
809EXPORT_SYMBOL(schedule_work_on);
810
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700811/**
812 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000813 * @dwork: job to be done
814 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700815 *
816 * After waiting for a given time this puts a job in the kernel-global
817 * workqueue.
818 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800819int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800820 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
David Howells52bad642006-11-22 14:54:01 +0000822 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823}
Dave Jonesae90dd52006-06-30 01:40:45 -0400824EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700826/**
Linus Torvalds8c53e462009-10-14 09:16:42 -0700827 * flush_delayed_work - block until a dwork_struct's callback has terminated
828 * @dwork: the delayed work which is to be flushed
829 *
830 * Any timeout is cancelled, and any pending work is run immediately.
831 */
832void flush_delayed_work(struct delayed_work *dwork)
833{
834 if (del_timer_sync(&dwork->timer)) {
Tejun Heo4690c4a2010-06-29 10:07:10 +0200835 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
836 &dwork->work);
Linus Torvalds8c53e462009-10-14 09:16:42 -0700837 put_cpu();
838 }
839 flush_work(&dwork->work);
840}
841EXPORT_SYMBOL(flush_delayed_work);
842
843/**
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700844 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
845 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000846 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700847 * @delay: number of jiffies to wait
848 *
849 * After waiting for a given time this puts a job in the kernel-global
850 * workqueue on the specified CPU.
851 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000853 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
David Howells52bad642006-11-22 14:54:01 +0000855 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856}
Dave Jonesae90dd52006-06-30 01:40:45 -0400857EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Andrew Mortonb6136772006-06-25 05:47:49 -0700859/**
860 * schedule_on_each_cpu - call a function on each online CPU from keventd
861 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700862 *
863 * Returns zero on success.
864 * Returns -ve errno on failure.
865 *
Andrew Mortonb6136772006-06-25 05:47:49 -0700866 * schedule_on_each_cpu() is very slow.
867 */
David Howells65f27f32006-11-22 14:55:48 +0000868int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800869{
870 int cpu;
Andi Kleen65a64462009-10-14 06:22:47 +0200871 int orig = -1;
Andrew Mortonb6136772006-06-25 05:47:49 -0700872 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800873
Andrew Mortonb6136772006-06-25 05:47:49 -0700874 works = alloc_percpu(struct work_struct);
875 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800876 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700877
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100878 get_online_cpus();
Tejun Heo93981802009-11-17 14:06:20 -0800879
880 /*
881 * When running in keventd don't schedule a work item on
882 * itself. Can just call directly because the work queue is
883 * already bound. This also is faster.
884 */
885 if (current_is_keventd())
886 orig = raw_smp_processor_id();
887
Christoph Lameter15316ba2006-01-08 01:00:43 -0800888 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100889 struct work_struct *work = per_cpu_ptr(works, cpu);
890
891 INIT_WORK(work, func);
Andi Kleen65a64462009-10-14 06:22:47 +0200892 if (cpu != orig)
Tejun Heo93981802009-11-17 14:06:20 -0800893 schedule_work_on(cpu, work);
Andi Kleen65a64462009-10-14 06:22:47 +0200894 }
Tejun Heo93981802009-11-17 14:06:20 -0800895 if (orig >= 0)
896 func(per_cpu_ptr(works, orig));
897
898 for_each_online_cpu(cpu)
899 flush_work(per_cpu_ptr(works, cpu));
900
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100901 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -0700902 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800903 return 0;
904}
905
Alan Sterneef6a7d2010-02-12 17:39:21 +0900906/**
907 * flush_scheduled_work - ensure that any scheduled work has run to completion.
908 *
909 * Forces execution of the kernel-global workqueue and blocks until its
910 * completion.
911 *
912 * Think twice before calling this function! It's very easy to get into
913 * trouble if you don't take great care. Either of the following situations
914 * will lead to deadlock:
915 *
916 * One of the work items currently on the workqueue needs to acquire
917 * a lock held by your code or its caller.
918 *
919 * Your code is running in the context of a work routine.
920 *
921 * They will be detected by lockdep when they occur, but the first might not
922 * occur very often. It depends on what work items are on the workqueue and
923 * what locks they need, which you have no control over.
924 *
925 * In most situations flushing the entire workqueue is overkill; you merely
926 * need to know that a particular work item isn't queued and isn't running.
927 * In such cases you should use cancel_delayed_work_sync() or
928 * cancel_work_sync() instead.
929 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930void flush_scheduled_work(void)
931{
932 flush_workqueue(keventd_wq);
933}
Dave Jonesae90dd52006-06-30 01:40:45 -0400934EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936/**
James Bottomley1fa44ec2006-02-23 12:43:43 -0600937 * execute_in_process_context - reliably execute the routine with user context
938 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600939 * @ew: guaranteed storage for the execute work structure (must
940 * be available when the work executes)
941 *
942 * Executes the function immediately if process context is available,
943 * otherwise schedules the function for delayed execution.
944 *
945 * Returns: 0 - function was executed
946 * 1 - function was scheduled for execution
947 */
David Howells65f27f32006-11-22 14:55:48 +0000948int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600949{
950 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000951 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600952 return 0;
953 }
954
David Howells65f27f32006-11-22 14:55:48 +0000955 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600956 schedule_work(&ew->work);
957
958 return 1;
959}
960EXPORT_SYMBOL_GPL(execute_in_process_context);
961
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962int keventd_up(void)
963{
964 return keventd_wq != NULL;
965}
966
967int current_is_keventd(void)
968{
969 struct cpu_workqueue_struct *cwq;
Hugh Dickinsd2437692007-08-27 16:06:19 +0100970 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 int ret = 0;
972
973 BUG_ON(!keventd_wq);
974
Christoph Lameter89ada672005-10-30 15:01:59 -0800975 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (current == cwq->thread)
977 ret = 1;
978
979 return ret;
980
981}
982
Oleg Nesterov3af244332007-05-09 02:34:09 -0700983static struct cpu_workqueue_struct *
984init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
Christoph Lameter89ada672005-10-30 15:01:59 -0800986 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Oleg Nesterov3af244332007-05-09 02:34:09 -0700988 cwq->wq = wq;
989 spin_lock_init(&cwq->lock);
990 INIT_LIST_HEAD(&cwq->worklist);
991 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Oleg Nesterov3af244332007-05-09 02:34:09 -0700993 return cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994}
995
Oleg Nesterov3af244332007-05-09 02:34:09 -0700996static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
Oleg Nesterov3af244332007-05-09 02:34:09 -0700998 struct workqueue_struct *wq = cwq->wq;
David Howells6cc88bc2008-11-14 10:39:21 +1100999 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
Oleg Nesterov3af244332007-05-09 02:34:09 -07001000 struct task_struct *p;
1001
1002 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
1003 /*
1004 * Nobody can add the work_struct to this cwq,
1005 * if (caller is __create_workqueue)
1006 * nobody should see this wq
1007 * else // caller is CPU_UP_PREPARE
1008 * cpu is not on cpu_online_map
1009 * so we can abort safely.
1010 */
1011 if (IS_ERR(p))
1012 return PTR_ERR(p);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001013 cwq->thread = p;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001014
1015 return 0;
1016}
1017
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07001018static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1019{
1020 struct task_struct *p = cwq->thread;
1021
1022 if (p != NULL) {
1023 if (cpu >= 0)
1024 kthread_bind(p, cpu);
1025 wake_up_process(p);
1026 }
1027}
1028
Johannes Berg4e6045f2007-10-18 23:39:55 -07001029struct workqueue_struct *__create_workqueue_key(const char *name,
Tejun Heo97e37d72010-06-29 10:07:10 +02001030 unsigned int flags,
Johannes Bergeb13ba82008-01-16 09:51:58 +01001031 struct lock_class_key *key,
1032 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001033{
1034 struct workqueue_struct *wq;
1035 struct cpu_workqueue_struct *cwq;
1036 int err = 0, cpu;
1037
1038 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1039 if (!wq)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001040 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001041
1042 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Tejun Heo4690c4a2010-06-29 10:07:10 +02001043 if (!wq->cpu_wq)
1044 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001045
Tejun Heo97e37d72010-06-29 10:07:10 +02001046 wq->flags = flags;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001047 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +01001048 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -07001049 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001050
Tejun Heo97e37d72010-06-29 10:07:10 +02001051 if (flags & WQ_SINGLE_THREAD) {
Oleg Nesterov3af244332007-05-09 02:34:09 -07001052 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1053 err = create_workqueue_thread(cwq, singlethread_cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07001054 start_workqueue_thread(cwq, -1);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001055 } else {
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001056 cpu_maps_update_begin();
Oleg Nesterov6af8bf32008-07-29 22:33:49 -07001057 /*
1058 * We must place this wq on list even if the code below fails.
1059 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1060 * destroy_workqueue() takes the lock, in that case we leak
1061 * cwq[cpu]->thread.
1062 */
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001063 spin_lock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001064 list_add(&wq->list, &workqueues);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001065 spin_unlock(&workqueue_lock);
Oleg Nesterov6af8bf32008-07-29 22:33:49 -07001066 /*
1067 * We must initialize cwqs for each possible cpu even if we
1068 * are going to call destroy_workqueue() finally. Otherwise
1069 * cpu_up() can hit the uninitialized cwq once we drop the
1070 * lock.
1071 */
Oleg Nesterov3af244332007-05-09 02:34:09 -07001072 for_each_possible_cpu(cpu) {
1073 cwq = init_cpu_workqueue(wq, cpu);
1074 if (err || !cpu_online(cpu))
1075 continue;
1076 err = create_workqueue_thread(cwq, cpu);
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07001077 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001078 }
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001079 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -07001080 }
1081
1082 if (err) {
1083 destroy_workqueue(wq);
1084 wq = NULL;
1085 }
1086 return wq;
Tejun Heo4690c4a2010-06-29 10:07:10 +02001087err:
1088 if (wq) {
1089 free_percpu(wq->cpu_wq);
1090 kfree(wq);
1091 }
1092 return NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001093}
Johannes Berg4e6045f2007-10-18 23:39:55 -07001094EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001095
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -07001096static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001097{
Oleg Nesterov14441962007-05-23 13:57:57 -07001098 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001099 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1100 * cpu_add_remove_lock protects cwq->thread.
Oleg Nesterov14441962007-05-23 13:57:57 -07001101 */
1102 if (cwq->thread == NULL)
1103 return;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001104
Ingo Molnar3295f0e2008-08-11 10:30:30 +02001105 lock_map_acquire(&cwq->wq->lockdep_map);
1106 lock_map_release(&cwq->wq->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -07001107
Oleg Nesterov13c22162007-07-17 04:03:55 -07001108 flush_cpu_workqueue(cwq);
Oleg Nesterov14441962007-05-23 13:57:57 -07001109 /*
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001110 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
Oleg Nesterov13c22162007-07-17 04:03:55 -07001111 * a concurrent flush_workqueue() can insert a barrier after us.
1112 * However, in that case run_workqueue() won't return and check
1113 * kthread_should_stop() until it flushes all work_struct's.
Oleg Nesterov14441962007-05-23 13:57:57 -07001114 * When ->worklist becomes empty it is safe to exit because no
1115 * more work_structs can be queued on this cwq: flush_workqueue
1116 * checks list_empty(), and a "normal" queue_work() can't use
1117 * a dead CPU.
1118 */
Oleg Nesterov14441962007-05-23 13:57:57 -07001119 kthread_stop(cwq->thread);
1120 cwq->thread = NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001121}
1122
1123/**
1124 * destroy_workqueue - safely terminate a workqueue
1125 * @wq: target workqueue
1126 *
1127 * Safely destroy a workqueue. All work currently pending will be done first.
1128 */
1129void destroy_workqueue(struct workqueue_struct *wq)
1130{
Rusty Russelle7577c52009-01-01 10:12:25 +10301131 const struct cpumask *cpu_map = wq_cpu_map(wq);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001132 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001133
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001134 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001135 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001136 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001137 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001138
Rusty Russellaa85ea52009-03-30 22:05:15 -06001139 for_each_cpu(cpu, cpu_map)
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -07001140 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001141 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -07001142
1143 free_percpu(wq->cpu_wq);
1144 kfree(wq);
1145}
1146EXPORT_SYMBOL_GPL(destroy_workqueue);
1147
1148static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1149 unsigned long action,
1150 void *hcpu)
1151{
1152 unsigned int cpu = (unsigned long)hcpu;
1153 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 struct workqueue_struct *wq;
Akinobu Mita80b51842010-05-26 14:43:32 -07001155 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001157 action &= ~CPU_TASKS_FROZEN;
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 switch (action) {
1160 case CPU_UP_PREPARE:
Rusty Russelle7577c52009-01-01 10:12:25 +10301161 cpumask_set_cpu(cpu, cpu_populated_map);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001162 }
Oleg Nesterov84485022008-07-25 01:47:54 -07001163undo:
Oleg Nesterov3af244332007-05-09 02:34:09 -07001164 list_for_each_entry(wq, &workqueues, list) {
1165 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Christoph Lameter89ada672005-10-30 15:01:59 -08001166
Oleg Nesterov3af244332007-05-09 02:34:09 -07001167 switch (action) {
1168 case CPU_UP_PREPARE:
Akinobu Mita80b51842010-05-26 14:43:32 -07001169 err = create_workqueue_thread(cwq, cpu);
1170 if (!err)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001171 break;
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001172 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1173 wq->name, cpu);
Oleg Nesterov84485022008-07-25 01:47:54 -07001174 action = CPU_UP_CANCELED;
Akinobu Mita80b51842010-05-26 14:43:32 -07001175 err = -ENOMEM;
Oleg Nesterov84485022008-07-25 01:47:54 -07001176 goto undo;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001177
1178 case CPU_ONLINE:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07001179 start_workqueue_thread(cwq, cpu);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001180 break;
1181
1182 case CPU_UP_CANCELED:
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07001183 start_workqueue_thread(cwq, -1);
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001184 case CPU_POST_DEAD:
Oleg Nesterov1e35eaa2008-04-29 01:00:28 -07001185 cleanup_workqueue_thread(cwq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001186 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 }
1189
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07001190 switch (action) {
1191 case CPU_UP_CANCELED:
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001192 case CPU_POST_DEAD:
Rusty Russelle7577c52009-01-01 10:12:25 +10301193 cpumask_clear_cpu(cpu, cpu_populated_map);
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07001194 }
1195
Akinobu Mita80b51842010-05-26 14:43:32 -07001196 return notifier_from_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Rusty Russell2d3854a2008-11-05 13:39:10 +11001199#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -08001200
Rusty Russell2d3854a2008-11-05 13:39:10 +11001201struct work_for_cpu {
Andrew Morton6b440032009-04-09 09:50:37 -06001202 struct completion completion;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001203 long (*fn)(void *);
1204 void *arg;
1205 long ret;
1206};
1207
Andrew Morton6b440032009-04-09 09:50:37 -06001208static int do_work_for_cpu(void *_wfc)
Rusty Russell2d3854a2008-11-05 13:39:10 +11001209{
Andrew Morton6b440032009-04-09 09:50:37 -06001210 struct work_for_cpu *wfc = _wfc;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001211 wfc->ret = wfc->fn(wfc->arg);
Andrew Morton6b440032009-04-09 09:50:37 -06001212 complete(&wfc->completion);
1213 return 0;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001214}
1215
1216/**
1217 * work_on_cpu - run a function in user context on a particular cpu
1218 * @cpu: the cpu to run on
1219 * @fn: the function to run
1220 * @arg: the function arg
1221 *
Rusty Russell31ad9082009-01-16 15:31:15 -08001222 * This will return the value @fn returns.
1223 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b440032009-04-09 09:50:37 -06001224 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11001225 */
1226long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1227{
Andrew Morton6b440032009-04-09 09:50:37 -06001228 struct task_struct *sub_thread;
1229 struct work_for_cpu wfc = {
1230 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1231 .fn = fn,
1232 .arg = arg,
1233 };
Rusty Russell2d3854a2008-11-05 13:39:10 +11001234
Andrew Morton6b440032009-04-09 09:50:37 -06001235 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1236 if (IS_ERR(sub_thread))
1237 return PTR_ERR(sub_thread);
1238 kthread_bind(sub_thread, cpu);
1239 wake_up_process(sub_thread);
1240 wait_for_completion(&wfc.completion);
Rusty Russell2d3854a2008-11-05 13:39:10 +11001241 return wfc.ret;
1242}
1243EXPORT_SYMBOL_GPL(work_on_cpu);
1244#endif /* CONFIG_SMP */
1245
Oleg Nesterovc12920d2007-05-09 02:34:14 -07001246void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247{
Rusty Russelle7577c52009-01-01 10:12:25 +10301248 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1249
1250 cpumask_copy(cpu_populated_map, cpu_online_mask);
1251 singlethread_cpu = cpumask_first(cpu_possible_mask);
1252 cpu_singlethread_map = cpumask_of(singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 hotcpu_notifier(workqueue_cpu_callback, 0);
1254 keventd_wq = create_workqueue("events");
1255 BUG_ON(!keventd_wq);
1256}