workqueue: change cancel_work_sync() to clear work->data
[linux-2.6.git] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
38
39 /*
40  * The per-CPU workqueue (if single thread, we always use the first
41  * possible cpu).
42  */
43 struct cpu_workqueue_struct {
44
45         spinlock_t lock;
46
47         struct list_head worklist;
48         wait_queue_head_t more_work;
49         struct work_struct *current_work;
50
51         struct workqueue_struct *wq;
52         struct task_struct *thread;
53 } ____cacheline_aligned;
54
55 /*
56  * The externally visible workqueue abstraction is an array of
57  * per-CPU workqueues:
58  */
59 struct workqueue_struct {
60         struct cpu_workqueue_struct *cpu_wq;
61         struct list_head list;
62         const char *name;
63         int singlethread;
64         int freezeable;         /* Freeze threads during suspend */
65         int rt;
66 #ifdef CONFIG_LOCKDEP
67         struct lockdep_map lockdep_map;
68 #endif
69 };
70
71 #ifdef CONFIG_DEBUG_OBJECTS_WORK
72
73 static struct debug_obj_descr work_debug_descr;
74
75 /*
76  * fixup_init is called when:
77  * - an active object is initialized
78  */
79 static int work_fixup_init(void *addr, enum debug_obj_state state)
80 {
81         struct work_struct *work = addr;
82
83         switch (state) {
84         case ODEBUG_STATE_ACTIVE:
85                 cancel_work_sync(work);
86                 debug_object_init(work, &work_debug_descr);
87                 return 1;
88         default:
89                 return 0;
90         }
91 }
92
93 /*
94  * fixup_activate is called when:
95  * - an active object is activated
96  * - an unknown object is activated (might be a statically initialized object)
97  */
98 static int work_fixup_activate(void *addr, enum debug_obj_state state)
99 {
100         struct work_struct *work = addr;
101
102         switch (state) {
103
104         case ODEBUG_STATE_NOTAVAILABLE:
105                 /*
106                  * This is not really a fixup. The work struct was
107                  * statically initialized. We just make sure that it
108                  * is tracked in the object tracker.
109                  */
110                 if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
111                         debug_object_init(work, &work_debug_descr);
112                         debug_object_activate(work, &work_debug_descr);
113                         return 0;
114                 }
115                 WARN_ON_ONCE(1);
116                 return 0;
117
118         case ODEBUG_STATE_ACTIVE:
119                 WARN_ON(1);
120
121         default:
122                 return 0;
123         }
124 }
125
126 /*
127  * fixup_free is called when:
128  * - an active object is freed
129  */
130 static int work_fixup_free(void *addr, enum debug_obj_state state)
131 {
132         struct work_struct *work = addr;
133
134         switch (state) {
135         case ODEBUG_STATE_ACTIVE:
136                 cancel_work_sync(work);
137                 debug_object_free(work, &work_debug_descr);
138                 return 1;
139         default:
140                 return 0;
141         }
142 }
143
144 static struct debug_obj_descr work_debug_descr = {
145         .name           = "work_struct",
146         .fixup_init     = work_fixup_init,
147         .fixup_activate = work_fixup_activate,
148         .fixup_free     = work_fixup_free,
149 };
150
151 static inline void debug_work_activate(struct work_struct *work)
152 {
153         debug_object_activate(work, &work_debug_descr);
154 }
155
156 static inline void debug_work_deactivate(struct work_struct *work)
157 {
158         debug_object_deactivate(work, &work_debug_descr);
159 }
160
161 void __init_work(struct work_struct *work, int onstack)
162 {
163         if (onstack)
164                 debug_object_init_on_stack(work, &work_debug_descr);
165         else
166                 debug_object_init(work, &work_debug_descr);
167 }
168 EXPORT_SYMBOL_GPL(__init_work);
169
170 void destroy_work_on_stack(struct work_struct *work)
171 {
172         debug_object_free(work, &work_debug_descr);
173 }
174 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
175
176 #else
177 static inline void debug_work_activate(struct work_struct *work) { }
178 static inline void debug_work_deactivate(struct work_struct *work) { }
179 #endif
180
181 /* Serializes the accesses to the list of workqueues. */
182 static DEFINE_SPINLOCK(workqueue_lock);
183 static LIST_HEAD(workqueues);
184
185 static int singlethread_cpu __read_mostly;
186 static const struct cpumask *cpu_singlethread_map __read_mostly;
187 /*
188  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
189  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
190  * which comes in between can't use for_each_online_cpu(). We could
191  * use cpu_possible_map, the cpumask below is more a documentation
192  * than optimization.
193  */
194 static cpumask_var_t cpu_populated_map __read_mostly;
195
196 /* If it's single threaded, it isn't in the list of workqueues. */
197 static inline int is_wq_single_threaded(struct workqueue_struct *wq)
198 {
199         return wq->singlethread;
200 }
201
202 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
203 {
204         return is_wq_single_threaded(wq)
205                 ? cpu_singlethread_map : cpu_populated_map;
206 }
207
208 static
209 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
210 {
211         if (unlikely(is_wq_single_threaded(wq)))
212                 cpu = singlethread_cpu;
213         return per_cpu_ptr(wq->cpu_wq, cpu);
214 }
215
216 /*
217  * Set the workqueue on which a work item is to be run
218  * - Must *only* be called if the pending flag is set
219  */
220 static inline void set_wq_data(struct work_struct *work,
221                                 struct cpu_workqueue_struct *cwq)
222 {
223         unsigned long new;
224
225         BUG_ON(!work_pending(work));
226
227         new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
228         new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
229         atomic_long_set(&work->data, new);
230 }
231
232 /*
233  * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
234  */
235 static inline void clear_wq_data(struct work_struct *work)
236 {
237         unsigned long flags = *work_data_bits(work) &
238                                 (1UL << WORK_STRUCT_STATIC);
239         atomic_long_set(&work->data, flags);
240 }
241
242 static inline
243 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
244 {
245         return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
246 }
247
248 static void insert_work(struct cpu_workqueue_struct *cwq,
249                         struct work_struct *work, struct list_head *head)
250 {
251         trace_workqueue_insertion(cwq->thread, work);
252
253         set_wq_data(work, cwq);
254         /*
255          * Ensure that we get the right work->data if we see the
256          * result of list_add() below, see try_to_grab_pending().
257          */
258         smp_wmb();
259         list_add_tail(&work->entry, head);
260         wake_up(&cwq->more_work);
261 }
262
263 static void __queue_work(struct cpu_workqueue_struct *cwq,
264                          struct work_struct *work)
265 {
266         unsigned long flags;
267
268         debug_work_activate(work);
269         spin_lock_irqsave(&cwq->lock, flags);
270         insert_work(cwq, work, &cwq->worklist);
271         spin_unlock_irqrestore(&cwq->lock, flags);
272 }
273
274 /**
275  * queue_work - queue work on a workqueue
276  * @wq: workqueue to use
277  * @work: work to queue
278  *
279  * Returns 0 if @work was already on a queue, non-zero otherwise.
280  *
281  * We queue the work to the CPU on which it was submitted, but if the CPU dies
282  * it can be processed by another CPU.
283  */
284 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
285 {
286         int ret;
287
288         ret = queue_work_on(get_cpu(), wq, work);
289         put_cpu();
290
291         return ret;
292 }
293 EXPORT_SYMBOL_GPL(queue_work);
294
295 /**
296  * queue_work_on - queue work on specific cpu
297  * @cpu: CPU number to execute work on
298  * @wq: workqueue to use
299  * @work: work to queue
300  *
301  * Returns 0 if @work was already on a queue, non-zero otherwise.
302  *
303  * We queue the work to a specific CPU, the caller must ensure it
304  * can't go away.
305  */
306 int
307 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
308 {
309         int ret = 0;
310
311         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
312                 BUG_ON(!list_empty(&work->entry));
313                 __queue_work(wq_per_cpu(wq, cpu), work);
314                 ret = 1;
315         }
316         return ret;
317 }
318 EXPORT_SYMBOL_GPL(queue_work_on);
319
320 static void delayed_work_timer_fn(unsigned long __data)
321 {
322         struct delayed_work *dwork = (struct delayed_work *)__data;
323         struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
324         struct workqueue_struct *wq = cwq->wq;
325
326         __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
327 }
328
329 /**
330  * queue_delayed_work - queue work on a workqueue after delay
331  * @wq: workqueue to use
332  * @dwork: delayable work to queue
333  * @delay: number of jiffies to wait before queueing
334  *
335  * Returns 0 if @work was already on a queue, non-zero otherwise.
336  */
337 int queue_delayed_work(struct workqueue_struct *wq,
338                         struct delayed_work *dwork, unsigned long delay)
339 {
340         if (delay == 0)
341                 return queue_work(wq, &dwork->work);
342
343         return queue_delayed_work_on(-1, wq, dwork, delay);
344 }
345 EXPORT_SYMBOL_GPL(queue_delayed_work);
346
347 /**
348  * queue_delayed_work_on - queue work on specific CPU after delay
349  * @cpu: CPU number to execute work on
350  * @wq: workqueue to use
351  * @dwork: work to queue
352  * @delay: number of jiffies to wait before queueing
353  *
354  * Returns 0 if @work was already on a queue, non-zero otherwise.
355  */
356 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
357                         struct delayed_work *dwork, unsigned long delay)
358 {
359         int ret = 0;
360         struct timer_list *timer = &dwork->timer;
361         struct work_struct *work = &dwork->work;
362
363         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
364                 BUG_ON(timer_pending(timer));
365                 BUG_ON(!list_empty(&work->entry));
366
367                 timer_stats_timer_set_start_info(&dwork->timer);
368
369                 /* This stores cwq for the moment, for the timer_fn */
370                 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
371                 timer->expires = jiffies + delay;
372                 timer->data = (unsigned long)dwork;
373                 timer->function = delayed_work_timer_fn;
374
375                 if (unlikely(cpu >= 0))
376                         add_timer_on(timer, cpu);
377                 else
378                         add_timer(timer);
379                 ret = 1;
380         }
381         return ret;
382 }
383 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
384
385 static void run_workqueue(struct cpu_workqueue_struct *cwq)
386 {
387         spin_lock_irq(&cwq->lock);
388         while (!list_empty(&cwq->worklist)) {
389                 struct work_struct *work = list_entry(cwq->worklist.next,
390                                                 struct work_struct, entry);
391                 work_func_t f = work->func;
392 #ifdef CONFIG_LOCKDEP
393                 /*
394                  * It is permissible to free the struct work_struct
395                  * from inside the function that is called from it,
396                  * this we need to take into account for lockdep too.
397                  * To avoid bogus "held lock freed" warnings as well
398                  * as problems when looking into work->lockdep_map,
399                  * make a copy and use that here.
400                  */
401                 struct lockdep_map lockdep_map = work->lockdep_map;
402 #endif
403                 trace_workqueue_execution(cwq->thread, work);
404                 debug_work_deactivate(work);
405                 cwq->current_work = work;
406                 list_del_init(cwq->worklist.next);
407                 spin_unlock_irq(&cwq->lock);
408
409                 BUG_ON(get_wq_data(work) != cwq);
410                 work_clear_pending(work);
411                 lock_map_acquire(&cwq->wq->lockdep_map);
412                 lock_map_acquire(&lockdep_map);
413                 f(work);
414                 lock_map_release(&lockdep_map);
415                 lock_map_release(&cwq->wq->lockdep_map);
416
417                 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
418                         printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
419                                         "%s/0x%08x/%d\n",
420                                         current->comm, preempt_count(),
421                                         task_pid_nr(current));
422                         printk(KERN_ERR "    last function: ");
423                         print_symbol("%s\n", (unsigned long)f);
424                         debug_show_held_locks(current);
425                         dump_stack();
426                 }
427
428                 spin_lock_irq(&cwq->lock);
429                 cwq->current_work = NULL;
430         }
431         spin_unlock_irq(&cwq->lock);
432 }
433
434 static int worker_thread(void *__cwq)
435 {
436         struct cpu_workqueue_struct *cwq = __cwq;
437         DEFINE_WAIT(wait);
438
439         if (cwq->wq->freezeable)
440                 set_freezable();
441
442         for (;;) {
443                 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
444                 if (!freezing(current) &&
445                     !kthread_should_stop() &&
446                     list_empty(&cwq->worklist))
447                         schedule();
448                 finish_wait(&cwq->more_work, &wait);
449
450                 try_to_freeze();
451
452                 if (kthread_should_stop())
453                         break;
454
455                 run_workqueue(cwq);
456         }
457
458         return 0;
459 }
460
461 struct wq_barrier {
462         struct work_struct      work;
463         struct completion       done;
464 };
465
466 static void wq_barrier_func(struct work_struct *work)
467 {
468         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
469         complete(&barr->done);
470 }
471
472 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
473                         struct wq_barrier *barr, struct list_head *head)
474 {
475         /*
476          * debugobject calls are safe here even with cwq->lock locked
477          * as we know for sure that this will not trigger any of the
478          * checks and call back into the fixup functions where we
479          * might deadlock.
480          */
481         INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
482         __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
483
484         init_completion(&barr->done);
485
486         debug_work_activate(&barr->work);
487         insert_work(cwq, &barr->work, head);
488 }
489
490 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
491 {
492         int active = 0;
493         struct wq_barrier barr;
494
495         WARN_ON(cwq->thread == current);
496
497         spin_lock_irq(&cwq->lock);
498         if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
499                 insert_wq_barrier(cwq, &barr, &cwq->worklist);
500                 active = 1;
501         }
502         spin_unlock_irq(&cwq->lock);
503
504         if (active) {
505                 wait_for_completion(&barr.done);
506                 destroy_work_on_stack(&barr.work);
507         }
508
509         return active;
510 }
511
512 /**
513  * flush_workqueue - ensure that any scheduled work has run to completion.
514  * @wq: workqueue to flush
515  *
516  * Forces execution of the workqueue and blocks until its completion.
517  * This is typically used in driver shutdown handlers.
518  *
519  * We sleep until all works which were queued on entry have been handled,
520  * but we are not livelocked by new incoming ones.
521  *
522  * This function used to run the workqueues itself.  Now we just wait for the
523  * helper threads to do it.
524  */
525 void flush_workqueue(struct workqueue_struct *wq)
526 {
527         const struct cpumask *cpu_map = wq_cpu_map(wq);
528         int cpu;
529
530         might_sleep();
531         lock_map_acquire(&wq->lockdep_map);
532         lock_map_release(&wq->lockdep_map);
533         for_each_cpu(cpu, cpu_map)
534                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
535 }
536 EXPORT_SYMBOL_GPL(flush_workqueue);
537
538 /**
539  * flush_work - block until a work_struct's callback has terminated
540  * @work: the work which is to be flushed
541  *
542  * Returns false if @work has already terminated.
543  *
544  * It is expected that, prior to calling flush_work(), the caller has
545  * arranged for the work to not be requeued, otherwise it doesn't make
546  * sense to use this function.
547  */
548 int flush_work(struct work_struct *work)
549 {
550         struct cpu_workqueue_struct *cwq;
551         struct list_head *prev;
552         struct wq_barrier barr;
553
554         might_sleep();
555         cwq = get_wq_data(work);
556         if (!cwq)
557                 return 0;
558
559         lock_map_acquire(&cwq->wq->lockdep_map);
560         lock_map_release(&cwq->wq->lockdep_map);
561
562         prev = NULL;
563         spin_lock_irq(&cwq->lock);
564         if (!list_empty(&work->entry)) {
565                 /*
566                  * See the comment near try_to_grab_pending()->smp_rmb().
567                  * If it was re-queued under us we are not going to wait.
568                  */
569                 smp_rmb();
570                 if (unlikely(cwq != get_wq_data(work)))
571                         goto out;
572                 prev = &work->entry;
573         } else {
574                 if (cwq->current_work != work)
575                         goto out;
576                 prev = &cwq->worklist;
577         }
578         insert_wq_barrier(cwq, &barr, prev->next);
579 out:
580         spin_unlock_irq(&cwq->lock);
581         if (!prev)
582                 return 0;
583
584         wait_for_completion(&barr.done);
585         destroy_work_on_stack(&barr.work);
586         return 1;
587 }
588 EXPORT_SYMBOL_GPL(flush_work);
589
590 /*
591  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
592  * so this work can't be re-armed in any way.
593  */
594 static int try_to_grab_pending(struct work_struct *work)
595 {
596         struct cpu_workqueue_struct *cwq;
597         int ret = -1;
598
599         if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
600                 return 0;
601
602         /*
603          * The queueing is in progress, or it is already queued. Try to
604          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
605          */
606
607         cwq = get_wq_data(work);
608         if (!cwq)
609                 return ret;
610
611         spin_lock_irq(&cwq->lock);
612         if (!list_empty(&work->entry)) {
613                 /*
614                  * This work is queued, but perhaps we locked the wrong cwq.
615                  * In that case we must see the new value after rmb(), see
616                  * insert_work()->wmb().
617                  */
618                 smp_rmb();
619                 if (cwq == get_wq_data(work)) {
620                         debug_work_deactivate(work);
621                         list_del_init(&work->entry);
622                         ret = 1;
623                 }
624         }
625         spin_unlock_irq(&cwq->lock);
626
627         return ret;
628 }
629
630 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
631                                 struct work_struct *work)
632 {
633         struct wq_barrier barr;
634         int running = 0;
635
636         spin_lock_irq(&cwq->lock);
637         if (unlikely(cwq->current_work == work)) {
638                 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
639                 running = 1;
640         }
641         spin_unlock_irq(&cwq->lock);
642
643         if (unlikely(running)) {
644                 wait_for_completion(&barr.done);
645                 destroy_work_on_stack(&barr.work);
646         }
647 }
648
649 static void wait_on_work(struct work_struct *work)
650 {
651         struct cpu_workqueue_struct *cwq;
652         struct workqueue_struct *wq;
653         const struct cpumask *cpu_map;
654         int cpu;
655
656         might_sleep();
657
658         lock_map_acquire(&work->lockdep_map);
659         lock_map_release(&work->lockdep_map);
660
661         cwq = get_wq_data(work);
662         if (!cwq)
663                 return;
664
665         wq = cwq->wq;
666         cpu_map = wq_cpu_map(wq);
667
668         for_each_cpu(cpu, cpu_map)
669                 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
670 }
671
672 static int __cancel_work_timer(struct work_struct *work,
673                                 struct timer_list* timer)
674 {
675         int ret;
676
677         do {
678                 ret = (timer && likely(del_timer(timer)));
679                 if (!ret)
680                         ret = try_to_grab_pending(work);
681                 wait_on_work(work);
682         } while (unlikely(ret < 0));
683
684         clear_wq_data(work);
685         return ret;
686 }
687
688 /**
689  * cancel_work_sync - block until a work_struct's callback has terminated
690  * @work: the work which is to be flushed
691  *
692  * Returns true if @work was pending.
693  *
694  * cancel_work_sync() will cancel the work if it is queued. If the work's
695  * callback appears to be running, cancel_work_sync() will block until it
696  * has completed.
697  *
698  * It is possible to use this function if the work re-queues itself. It can
699  * cancel the work even if it migrates to another workqueue, however in that
700  * case it only guarantees that work->func() has completed on the last queued
701  * workqueue.
702  *
703  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
704  * pending, otherwise it goes into a busy-wait loop until the timer expires.
705  *
706  * The caller must ensure that workqueue_struct on which this work was last
707  * queued can't be destroyed before this function returns.
708  */
709 int cancel_work_sync(struct work_struct *work)
710 {
711         return __cancel_work_timer(work, NULL);
712 }
713 EXPORT_SYMBOL_GPL(cancel_work_sync);
714
715 /**
716  * cancel_delayed_work_sync - reliably kill off a delayed work.
717  * @dwork: the delayed work struct
718  *
719  * Returns true if @dwork was pending.
720  *
721  * It is possible to use this function if @dwork rearms itself via queue_work()
722  * or queue_delayed_work(). See also the comment for cancel_work_sync().
723  */
724 int cancel_delayed_work_sync(struct delayed_work *dwork)
725 {
726         return __cancel_work_timer(&dwork->work, &dwork->timer);
727 }
728 EXPORT_SYMBOL(cancel_delayed_work_sync);
729
730 static struct workqueue_struct *keventd_wq __read_mostly;
731
732 /**
733  * schedule_work - put work task in global workqueue
734  * @work: job to be done
735  *
736  * Returns zero if @work was already on the kernel-global workqueue and
737  * non-zero otherwise.
738  *
739  * This puts a job in the kernel-global workqueue if it was not already
740  * queued and leaves it in the same position on the kernel-global
741  * workqueue otherwise.
742  */
743 int schedule_work(struct work_struct *work)
744 {
745         return queue_work(keventd_wq, work);
746 }
747 EXPORT_SYMBOL(schedule_work);
748
749 /*
750  * schedule_work_on - put work task on a specific cpu
751  * @cpu: cpu to put the work task on
752  * @work: job to be done
753  *
754  * This puts a job on a specific cpu
755  */
756 int schedule_work_on(int cpu, struct work_struct *work)
757 {
758         return queue_work_on(cpu, keventd_wq, work);
759 }
760 EXPORT_SYMBOL(schedule_work_on);
761
762 /**
763  * schedule_delayed_work - put work task in global workqueue after delay
764  * @dwork: job to be done
765  * @delay: number of jiffies to wait or 0 for immediate execution
766  *
767  * After waiting for a given time this puts a job in the kernel-global
768  * workqueue.
769  */
770 int schedule_delayed_work(struct delayed_work *dwork,
771                                         unsigned long delay)
772 {
773         return queue_delayed_work(keventd_wq, dwork, delay);
774 }
775 EXPORT_SYMBOL(schedule_delayed_work);
776
777 /**
778  * flush_delayed_work - block until a dwork_struct's callback has terminated
779  * @dwork: the delayed work which is to be flushed
780  *
781  * Any timeout is cancelled, and any pending work is run immediately.
782  */
783 void flush_delayed_work(struct delayed_work *dwork)
784 {
785         if (del_timer_sync(&dwork->timer)) {
786                 struct cpu_workqueue_struct *cwq;
787                 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
788                 __queue_work(cwq, &dwork->work);
789                 put_cpu();
790         }
791         flush_work(&dwork->work);
792 }
793 EXPORT_SYMBOL(flush_delayed_work);
794
795 /**
796  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
797  * @cpu: cpu to use
798  * @dwork: job to be done
799  * @delay: number of jiffies to wait
800  *
801  * After waiting for a given time this puts a job in the kernel-global
802  * workqueue on the specified CPU.
803  */
804 int schedule_delayed_work_on(int cpu,
805                         struct delayed_work *dwork, unsigned long delay)
806 {
807         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
808 }
809 EXPORT_SYMBOL(schedule_delayed_work_on);
810
811 /**
812  * schedule_on_each_cpu - call a function on each online CPU from keventd
813  * @func: the function to call
814  *
815  * Returns zero on success.
816  * Returns -ve errno on failure.
817  *
818  * schedule_on_each_cpu() is very slow.
819  */
820 int schedule_on_each_cpu(work_func_t func)
821 {
822         int cpu;
823         int orig = -1;
824         struct work_struct *works;
825
826         works = alloc_percpu(struct work_struct);
827         if (!works)
828                 return -ENOMEM;
829
830         get_online_cpus();
831
832         /*
833          * When running in keventd don't schedule a work item on
834          * itself.  Can just call directly because the work queue is
835          * already bound.  This also is faster.
836          */
837         if (current_is_keventd())
838                 orig = raw_smp_processor_id();
839
840         for_each_online_cpu(cpu) {
841                 struct work_struct *work = per_cpu_ptr(works, cpu);
842
843                 INIT_WORK(work, func);
844                 if (cpu != orig)
845                         schedule_work_on(cpu, work);
846         }
847         if (orig >= 0)
848                 func(per_cpu_ptr(works, orig));
849
850         for_each_online_cpu(cpu)
851                 flush_work(per_cpu_ptr(works, cpu));
852
853         put_online_cpus();
854         free_percpu(works);
855         return 0;
856 }
857
858 /**
859  * flush_scheduled_work - ensure that any scheduled work has run to completion.
860  *
861  * Forces execution of the kernel-global workqueue and blocks until its
862  * completion.
863  *
864  * Think twice before calling this function!  It's very easy to get into
865  * trouble if you don't take great care.  Either of the following situations
866  * will lead to deadlock:
867  *
868  *      One of the work items currently on the workqueue needs to acquire
869  *      a lock held by your code or its caller.
870  *
871  *      Your code is running in the context of a work routine.
872  *
873  * They will be detected by lockdep when they occur, but the first might not
874  * occur very often.  It depends on what work items are on the workqueue and
875  * what locks they need, which you have no control over.
876  *
877  * In most situations flushing the entire workqueue is overkill; you merely
878  * need to know that a particular work item isn't queued and isn't running.
879  * In such cases you should use cancel_delayed_work_sync() or
880  * cancel_work_sync() instead.
881  */
882 void flush_scheduled_work(void)
883 {
884         flush_workqueue(keventd_wq);
885 }
886 EXPORT_SYMBOL(flush_scheduled_work);
887
888 /**
889  * execute_in_process_context - reliably execute the routine with user context
890  * @fn:         the function to execute
891  * @ew:         guaranteed storage for the execute work structure (must
892  *              be available when the work executes)
893  *
894  * Executes the function immediately if process context is available,
895  * otherwise schedules the function for delayed execution.
896  *
897  * Returns:     0 - function was executed
898  *              1 - function was scheduled for execution
899  */
900 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
901 {
902         if (!in_interrupt()) {
903                 fn(&ew->work);
904                 return 0;
905         }
906
907         INIT_WORK(&ew->work, fn);
908         schedule_work(&ew->work);
909
910         return 1;
911 }
912 EXPORT_SYMBOL_GPL(execute_in_process_context);
913
914 int keventd_up(void)
915 {
916         return keventd_wq != NULL;
917 }
918
919 int current_is_keventd(void)
920 {
921         struct cpu_workqueue_struct *cwq;
922         int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
923         int ret = 0;
924
925         BUG_ON(!keventd_wq);
926
927         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
928         if (current == cwq->thread)
929                 ret = 1;
930
931         return ret;
932
933 }
934
935 static struct cpu_workqueue_struct *
936 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
937 {
938         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
939
940         cwq->wq = wq;
941         spin_lock_init(&cwq->lock);
942         INIT_LIST_HEAD(&cwq->worklist);
943         init_waitqueue_head(&cwq->more_work);
944
945         return cwq;
946 }
947
948 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
949 {
950         struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
951         struct workqueue_struct *wq = cwq->wq;
952         const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
953         struct task_struct *p;
954
955         p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
956         /*
957          * Nobody can add the work_struct to this cwq,
958          *      if (caller is __create_workqueue)
959          *              nobody should see this wq
960          *      else // caller is CPU_UP_PREPARE
961          *              cpu is not on cpu_online_map
962          * so we can abort safely.
963          */
964         if (IS_ERR(p))
965                 return PTR_ERR(p);
966         if (cwq->wq->rt)
967                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
968         cwq->thread = p;
969
970         trace_workqueue_creation(cwq->thread, cpu);
971
972         return 0;
973 }
974
975 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
976 {
977         struct task_struct *p = cwq->thread;
978
979         if (p != NULL) {
980                 if (cpu >= 0)
981                         kthread_bind(p, cpu);
982                 wake_up_process(p);
983         }
984 }
985
986 struct workqueue_struct *__create_workqueue_key(const char *name,
987                                                 int singlethread,
988                                                 int freezeable,
989                                                 int rt,
990                                                 struct lock_class_key *key,
991                                                 const char *lock_name)
992 {
993         struct workqueue_struct *wq;
994         struct cpu_workqueue_struct *cwq;
995         int err = 0, cpu;
996
997         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
998         if (!wq)
999                 return NULL;
1000
1001         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
1002         if (!wq->cpu_wq) {
1003                 kfree(wq);
1004                 return NULL;
1005         }
1006
1007         wq->name = name;
1008         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1009         wq->singlethread = singlethread;
1010         wq->freezeable = freezeable;
1011         wq->rt = rt;
1012         INIT_LIST_HEAD(&wq->list);
1013
1014         if (singlethread) {
1015                 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1016                 err = create_workqueue_thread(cwq, singlethread_cpu);
1017                 start_workqueue_thread(cwq, -1);
1018         } else {
1019                 cpu_maps_update_begin();
1020                 /*
1021                  * We must place this wq on list even if the code below fails.
1022                  * cpu_down(cpu) can remove cpu from cpu_populated_map before
1023                  * destroy_workqueue() takes the lock, in that case we leak
1024                  * cwq[cpu]->thread.
1025                  */
1026                 spin_lock(&workqueue_lock);
1027                 list_add(&wq->list, &workqueues);
1028                 spin_unlock(&workqueue_lock);
1029                 /*
1030                  * We must initialize cwqs for each possible cpu even if we
1031                  * are going to call destroy_workqueue() finally. Otherwise
1032                  * cpu_up() can hit the uninitialized cwq once we drop the
1033                  * lock.
1034                  */
1035                 for_each_possible_cpu(cpu) {
1036                         cwq = init_cpu_workqueue(wq, cpu);
1037                         if (err || !cpu_online(cpu))
1038                                 continue;
1039                         err = create_workqueue_thread(cwq, cpu);
1040                         start_workqueue_thread(cwq, cpu);
1041                 }
1042                 cpu_maps_update_done();
1043         }
1044
1045         if (err) {
1046                 destroy_workqueue(wq);
1047                 wq = NULL;
1048         }
1049         return wq;
1050 }
1051 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1052
1053 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1054 {
1055         /*
1056          * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1057          * cpu_add_remove_lock protects cwq->thread.
1058          */
1059         if (cwq->thread == NULL)
1060                 return;
1061
1062         lock_map_acquire(&cwq->wq->lockdep_map);
1063         lock_map_release(&cwq->wq->lockdep_map);
1064
1065         flush_cpu_workqueue(cwq);
1066         /*
1067          * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1068          * a concurrent flush_workqueue() can insert a barrier after us.
1069          * However, in that case run_workqueue() won't return and check
1070          * kthread_should_stop() until it flushes all work_struct's.
1071          * When ->worklist becomes empty it is safe to exit because no
1072          * more work_structs can be queued on this cwq: flush_workqueue
1073          * checks list_empty(), and a "normal" queue_work() can't use
1074          * a dead CPU.
1075          */
1076         trace_workqueue_destruction(cwq->thread);
1077         kthread_stop(cwq->thread);
1078         cwq->thread = NULL;
1079 }
1080
1081 /**
1082  * destroy_workqueue - safely terminate a workqueue
1083  * @wq: target workqueue
1084  *
1085  * Safely destroy a workqueue. All work currently pending will be done first.
1086  */
1087 void destroy_workqueue(struct workqueue_struct *wq)
1088 {
1089         const struct cpumask *cpu_map = wq_cpu_map(wq);
1090         int cpu;
1091
1092         cpu_maps_update_begin();
1093         spin_lock(&workqueue_lock);
1094         list_del(&wq->list);
1095         spin_unlock(&workqueue_lock);
1096
1097         for_each_cpu(cpu, cpu_map)
1098                 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
1099         cpu_maps_update_done();
1100
1101         free_percpu(wq->cpu_wq);
1102         kfree(wq);
1103 }
1104 EXPORT_SYMBOL_GPL(destroy_workqueue);
1105
1106 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1107                                                 unsigned long action,
1108                                                 void *hcpu)
1109 {
1110         unsigned int cpu = (unsigned long)hcpu;
1111         struct cpu_workqueue_struct *cwq;
1112         struct workqueue_struct *wq;
1113         int ret = NOTIFY_OK;
1114
1115         action &= ~CPU_TASKS_FROZEN;
1116
1117         switch (action) {
1118         case CPU_UP_PREPARE:
1119                 cpumask_set_cpu(cpu, cpu_populated_map);
1120         }
1121 undo:
1122         list_for_each_entry(wq, &workqueues, list) {
1123                 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1124
1125                 switch (action) {
1126                 case CPU_UP_PREPARE:
1127                         if (!create_workqueue_thread(cwq, cpu))
1128                                 break;
1129                         printk(KERN_ERR "workqueue [%s] for %i failed\n",
1130                                 wq->name, cpu);
1131                         action = CPU_UP_CANCELED;
1132                         ret = NOTIFY_BAD;
1133                         goto undo;
1134
1135                 case CPU_ONLINE:
1136                         start_workqueue_thread(cwq, cpu);
1137                         break;
1138
1139                 case CPU_UP_CANCELED:
1140                         start_workqueue_thread(cwq, -1);
1141                 case CPU_POST_DEAD:
1142                         cleanup_workqueue_thread(cwq);
1143                         break;
1144                 }
1145         }
1146
1147         switch (action) {
1148         case CPU_UP_CANCELED:
1149         case CPU_POST_DEAD:
1150                 cpumask_clear_cpu(cpu, cpu_populated_map);
1151         }
1152
1153         return ret;
1154 }
1155
1156 #ifdef CONFIG_SMP
1157
1158 struct work_for_cpu {
1159         struct completion completion;
1160         long (*fn)(void *);
1161         void *arg;
1162         long ret;
1163 };
1164
1165 static int do_work_for_cpu(void *_wfc)
1166 {
1167         struct work_for_cpu *wfc = _wfc;
1168         wfc->ret = wfc->fn(wfc->arg);
1169         complete(&wfc->completion);
1170         return 0;
1171 }
1172
1173 /**
1174  * work_on_cpu - run a function in user context on a particular cpu
1175  * @cpu: the cpu to run on
1176  * @fn: the function to run
1177  * @arg: the function arg
1178  *
1179  * This will return the value @fn returns.
1180  * It is up to the caller to ensure that the cpu doesn't go offline.
1181  * The caller must not hold any locks which would prevent @fn from completing.
1182  */
1183 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1184 {
1185         struct task_struct *sub_thread;
1186         struct work_for_cpu wfc = {
1187                 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1188                 .fn = fn,
1189                 .arg = arg,
1190         };
1191
1192         sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1193         if (IS_ERR(sub_thread))
1194                 return PTR_ERR(sub_thread);
1195         kthread_bind(sub_thread, cpu);
1196         wake_up_process(sub_thread);
1197         wait_for_completion(&wfc.completion);
1198         return wfc.ret;
1199 }
1200 EXPORT_SYMBOL_GPL(work_on_cpu);
1201 #endif /* CONFIG_SMP */
1202
1203 void __init init_workqueues(void)
1204 {
1205         alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1206
1207         cpumask_copy(cpu_populated_map, cpu_online_mask);
1208         singlethread_cpu = cpumask_first(cpu_possible_mask);
1209         cpu_singlethread_map = cpumask_of(singlethread_cpu);
1210         hotcpu_notifier(workqueue_cpu_callback, 0);
1211         keventd_wq = create_workqueue("events");
1212         BUG_ON(!keventd_wq);
1213 }