WorkStruct: Separate delayable and non-delayable events.
[linux-2.6.git] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton <andrewm@uow.edu.au>
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32
33 /*
34  * The per-CPU workqueue (if single thread, we always use the first
35  * possible cpu).
36  *
37  * The sequence counters are for flush_scheduled_work().  It wants to wait
38  * until all currently-scheduled works are completed, but it doesn't
39  * want to be livelocked by new, incoming ones.  So it waits until
40  * remove_sequence is >= the insert_sequence which pertained when
41  * flush_scheduled_work() was called.
42  */
43 struct cpu_workqueue_struct {
44
45         spinlock_t lock;
46
47         long remove_sequence;   /* Least-recently added (next to run) */
48         long insert_sequence;   /* Next to add */
49
50         struct list_head worklist;
51         wait_queue_head_t more_work;
52         wait_queue_head_t work_done;
53
54         struct workqueue_struct *wq;
55         struct task_struct *thread;
56
57         int run_depth;          /* Detect run_workqueue() recursion depth */
58 } ____cacheline_aligned;
59
60 /*
61  * The externally visible workqueue abstraction is an array of
62  * per-CPU workqueues:
63  */
64 struct workqueue_struct {
65         struct cpu_workqueue_struct *cpu_wq;
66         const char *name;
67         struct list_head list;  /* Empty if single thread */
68 };
69
70 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
71    threads to each one as cpus come/go. */
72 static DEFINE_MUTEX(workqueue_mutex);
73 static LIST_HEAD(workqueues);
74
75 static int singlethread_cpu;
76
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct *wq)
79 {
80         return list_empty(&wq->list);
81 }
82
83 /* Preempt must be disabled. */
84 static void __queue_work(struct cpu_workqueue_struct *cwq,
85                          struct work_struct *work)
86 {
87         unsigned long flags;
88
89         spin_lock_irqsave(&cwq->lock, flags);
90         work->wq_data = cwq;
91         list_add_tail(&work->entry, &cwq->worklist);
92         cwq->insert_sequence++;
93         wake_up(&cwq->more_work);
94         spin_unlock_irqrestore(&cwq->lock, flags);
95 }
96
97 /**
98  * queue_work - queue work on a workqueue
99  * @wq: workqueue to use
100  * @work: work to queue
101  *
102  * Returns 0 if @work was already on a queue, non-zero otherwise.
103  *
104  * We queue the work to the CPU it was submitted, but there is no
105  * guarantee that it will be processed by that CPU.
106  */
107 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
108 {
109         int ret = 0, cpu = get_cpu();
110
111         if (!test_and_set_bit(0, &work->pending)) {
112                 if (unlikely(is_single_threaded(wq)))
113                         cpu = singlethread_cpu;
114                 BUG_ON(!list_empty(&work->entry));
115                 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
116                 ret = 1;
117         }
118         put_cpu();
119         return ret;
120 }
121 EXPORT_SYMBOL_GPL(queue_work);
122
123 static void delayed_work_timer_fn(unsigned long __data)
124 {
125         struct delayed_work *dwork = (struct delayed_work *)__data;
126         struct workqueue_struct *wq = dwork->work.wq_data;
127         int cpu = smp_processor_id();
128
129         if (unlikely(is_single_threaded(wq)))
130                 cpu = singlethread_cpu;
131
132         __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
133 }
134
135 /**
136  * queue_delayed_work - queue work on a workqueue after delay
137  * @wq: workqueue to use
138  * @work: delayable work to queue
139  * @delay: number of jiffies to wait before queueing
140  *
141  * Returns 0 if @work was already on a queue, non-zero otherwise.
142  */
143 int fastcall queue_delayed_work(struct workqueue_struct *wq,
144                         struct delayed_work *dwork, unsigned long delay)
145 {
146         int ret = 0;
147         struct timer_list *timer = &dwork->timer;
148         struct work_struct *work = &dwork->work;
149
150         if (delay == 0)
151                 return queue_work(wq, work);
152
153         if (!test_and_set_bit(0, &work->pending)) {
154                 BUG_ON(timer_pending(timer));
155                 BUG_ON(!list_empty(&work->entry));
156
157                 /* This stores wq for the moment, for the timer_fn */
158                 work->wq_data = wq;
159                 timer->expires = jiffies + delay;
160                 timer->data = (unsigned long)dwork;
161                 timer->function = delayed_work_timer_fn;
162                 add_timer(timer);
163                 ret = 1;
164         }
165         return ret;
166 }
167 EXPORT_SYMBOL_GPL(queue_delayed_work);
168
169 /**
170  * queue_delayed_work_on - queue work on specific CPU after delay
171  * @cpu: CPU number to execute work on
172  * @wq: workqueue to use
173  * @work: work to queue
174  * @delay: number of jiffies to wait before queueing
175  *
176  * Returns 0 if @work was already on a queue, non-zero otherwise.
177  */
178 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
179                         struct delayed_work *dwork, unsigned long delay)
180 {
181         int ret = 0;
182         struct timer_list *timer = &dwork->timer;
183         struct work_struct *work = &dwork->work;
184
185         if (!test_and_set_bit(0, &work->pending)) {
186                 BUG_ON(timer_pending(timer));
187                 BUG_ON(!list_empty(&work->entry));
188
189                 /* This stores wq for the moment, for the timer_fn */
190                 work->wq_data = wq;
191                 timer->expires = jiffies + delay;
192                 timer->data = (unsigned long)dwork;
193                 timer->function = delayed_work_timer_fn;
194                 add_timer_on(timer, cpu);
195                 ret = 1;
196         }
197         return ret;
198 }
199 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
200
201 static void run_workqueue(struct cpu_workqueue_struct *cwq)
202 {
203         unsigned long flags;
204
205         /*
206          * Keep taking off work from the queue until
207          * done.
208          */
209         spin_lock_irqsave(&cwq->lock, flags);
210         cwq->run_depth++;
211         if (cwq->run_depth > 3) {
212                 /* morton gets to eat his hat */
213                 printk("%s: recursion depth exceeded: %d\n",
214                         __FUNCTION__, cwq->run_depth);
215                 dump_stack();
216         }
217         while (!list_empty(&cwq->worklist)) {
218                 struct work_struct *work = list_entry(cwq->worklist.next,
219                                                 struct work_struct, entry);
220                 void (*f) (void *) = work->func;
221                 void *data = work->data;
222
223                 list_del_init(cwq->worklist.next);
224                 spin_unlock_irqrestore(&cwq->lock, flags);
225
226                 BUG_ON(work->wq_data != cwq);
227                 clear_bit(0, &work->pending);
228                 f(data);
229
230                 spin_lock_irqsave(&cwq->lock, flags);
231                 cwq->remove_sequence++;
232                 wake_up(&cwq->work_done);
233         }
234         cwq->run_depth--;
235         spin_unlock_irqrestore(&cwq->lock, flags);
236 }
237
238 static int worker_thread(void *__cwq)
239 {
240         struct cpu_workqueue_struct *cwq = __cwq;
241         DECLARE_WAITQUEUE(wait, current);
242         struct k_sigaction sa;
243         sigset_t blocked;
244
245         current->flags |= PF_NOFREEZE;
246
247         set_user_nice(current, -5);
248
249         /* Block and flush all signals */
250         sigfillset(&blocked);
251         sigprocmask(SIG_BLOCK, &blocked, NULL);
252         flush_signals(current);
253
254         /*
255          * We inherited MPOL_INTERLEAVE from the booting kernel.
256          * Set MPOL_DEFAULT to insure node local allocations.
257          */
258         numa_default_policy();
259
260         /* SIG_IGN makes children autoreap: see do_notify_parent(). */
261         sa.sa.sa_handler = SIG_IGN;
262         sa.sa.sa_flags = 0;
263         siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
264         do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
265
266         set_current_state(TASK_INTERRUPTIBLE);
267         while (!kthread_should_stop()) {
268                 add_wait_queue(&cwq->more_work, &wait);
269                 if (list_empty(&cwq->worklist))
270                         schedule();
271                 else
272                         __set_current_state(TASK_RUNNING);
273                 remove_wait_queue(&cwq->more_work, &wait);
274
275                 if (!list_empty(&cwq->worklist))
276                         run_workqueue(cwq);
277                 set_current_state(TASK_INTERRUPTIBLE);
278         }
279         __set_current_state(TASK_RUNNING);
280         return 0;
281 }
282
283 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
284 {
285         if (cwq->thread == current) {
286                 /*
287                  * Probably keventd trying to flush its own queue. So simply run
288                  * it by hand rather than deadlocking.
289                  */
290                 run_workqueue(cwq);
291         } else {
292                 DEFINE_WAIT(wait);
293                 long sequence_needed;
294
295                 spin_lock_irq(&cwq->lock);
296                 sequence_needed = cwq->insert_sequence;
297
298                 while (sequence_needed - cwq->remove_sequence > 0) {
299                         prepare_to_wait(&cwq->work_done, &wait,
300                                         TASK_UNINTERRUPTIBLE);
301                         spin_unlock_irq(&cwq->lock);
302                         schedule();
303                         spin_lock_irq(&cwq->lock);
304                 }
305                 finish_wait(&cwq->work_done, &wait);
306                 spin_unlock_irq(&cwq->lock);
307         }
308 }
309
310 /**
311  * flush_workqueue - ensure that any scheduled work has run to completion.
312  * @wq: workqueue to flush
313  *
314  * Forces execution of the workqueue and blocks until its completion.
315  * This is typically used in driver shutdown handlers.
316  *
317  * This function will sample each workqueue's current insert_sequence number and
318  * will sleep until the head sequence is greater than or equal to that.  This
319  * means that we sleep until all works which were queued on entry have been
320  * handled, but we are not livelocked by new incoming ones.
321  *
322  * This function used to run the workqueues itself.  Now we just wait for the
323  * helper threads to do it.
324  */
325 void fastcall flush_workqueue(struct workqueue_struct *wq)
326 {
327         might_sleep();
328
329         if (is_single_threaded(wq)) {
330                 /* Always use first cpu's area. */
331                 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
332         } else {
333                 int cpu;
334
335                 mutex_lock(&workqueue_mutex);
336                 for_each_online_cpu(cpu)
337                         flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
338                 mutex_unlock(&workqueue_mutex);
339         }
340 }
341 EXPORT_SYMBOL_GPL(flush_workqueue);
342
343 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
344                                                    int cpu)
345 {
346         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
347         struct task_struct *p;
348
349         spin_lock_init(&cwq->lock);
350         cwq->wq = wq;
351         cwq->thread = NULL;
352         cwq->insert_sequence = 0;
353         cwq->remove_sequence = 0;
354         INIT_LIST_HEAD(&cwq->worklist);
355         init_waitqueue_head(&cwq->more_work);
356         init_waitqueue_head(&cwq->work_done);
357
358         if (is_single_threaded(wq))
359                 p = kthread_create(worker_thread, cwq, "%s", wq->name);
360         else
361                 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
362         if (IS_ERR(p))
363                 return NULL;
364         cwq->thread = p;
365         return p;
366 }
367
368 struct workqueue_struct *__create_workqueue(const char *name,
369                                             int singlethread)
370 {
371         int cpu, destroy = 0;
372         struct workqueue_struct *wq;
373         struct task_struct *p;
374
375         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
376         if (!wq)
377                 return NULL;
378
379         wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
380         if (!wq->cpu_wq) {
381                 kfree(wq);
382                 return NULL;
383         }
384
385         wq->name = name;
386         mutex_lock(&workqueue_mutex);
387         if (singlethread) {
388                 INIT_LIST_HEAD(&wq->list);
389                 p = create_workqueue_thread(wq, singlethread_cpu);
390                 if (!p)
391                         destroy = 1;
392                 else
393                         wake_up_process(p);
394         } else {
395                 list_add(&wq->list, &workqueues);
396                 for_each_online_cpu(cpu) {
397                         p = create_workqueue_thread(wq, cpu);
398                         if (p) {
399                                 kthread_bind(p, cpu);
400                                 wake_up_process(p);
401                         } else
402                                 destroy = 1;
403                 }
404         }
405         mutex_unlock(&workqueue_mutex);
406
407         /*
408          * Was there any error during startup? If yes then clean up:
409          */
410         if (destroy) {
411                 destroy_workqueue(wq);
412                 wq = NULL;
413         }
414         return wq;
415 }
416 EXPORT_SYMBOL_GPL(__create_workqueue);
417
418 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
419 {
420         struct cpu_workqueue_struct *cwq;
421         unsigned long flags;
422         struct task_struct *p;
423
424         cwq = per_cpu_ptr(wq->cpu_wq, cpu);
425         spin_lock_irqsave(&cwq->lock, flags);
426         p = cwq->thread;
427         cwq->thread = NULL;
428         spin_unlock_irqrestore(&cwq->lock, flags);
429         if (p)
430                 kthread_stop(p);
431 }
432
433 /**
434  * destroy_workqueue - safely terminate a workqueue
435  * @wq: target workqueue
436  *
437  * Safely destroy a workqueue. All work currently pending will be done first.
438  */
439 void destroy_workqueue(struct workqueue_struct *wq)
440 {
441         int cpu;
442
443         flush_workqueue(wq);
444
445         /* We don't need the distraction of CPUs appearing and vanishing. */
446         mutex_lock(&workqueue_mutex);
447         if (is_single_threaded(wq))
448                 cleanup_workqueue_thread(wq, singlethread_cpu);
449         else {
450                 for_each_online_cpu(cpu)
451                         cleanup_workqueue_thread(wq, cpu);
452                 list_del(&wq->list);
453         }
454         mutex_unlock(&workqueue_mutex);
455         free_percpu(wq->cpu_wq);
456         kfree(wq);
457 }
458 EXPORT_SYMBOL_GPL(destroy_workqueue);
459
460 static struct workqueue_struct *keventd_wq;
461
462 /**
463  * schedule_work - put work task in global workqueue
464  * @work: job to be done
465  *
466  * This puts a job in the kernel-global workqueue.
467  */
468 int fastcall schedule_work(struct work_struct *work)
469 {
470         return queue_work(keventd_wq, work);
471 }
472 EXPORT_SYMBOL(schedule_work);
473
474 /**
475  * schedule_delayed_work - put work task in global workqueue after delay
476  * @dwork: job to be done
477  * @delay: number of jiffies to wait or 0 for immediate execution
478  *
479  * After waiting for a given time this puts a job in the kernel-global
480  * workqueue.
481  */
482 int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
483 {
484         return queue_delayed_work(keventd_wq, dwork, delay);
485 }
486 EXPORT_SYMBOL(schedule_delayed_work);
487
488 /**
489  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
490  * @cpu: cpu to use
491  * @dwork: job to be done
492  * @delay: number of jiffies to wait
493  *
494  * After waiting for a given time this puts a job in the kernel-global
495  * workqueue on the specified CPU.
496  */
497 int schedule_delayed_work_on(int cpu,
498                         struct delayed_work *dwork, unsigned long delay)
499 {
500         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
501 }
502 EXPORT_SYMBOL(schedule_delayed_work_on);
503
504 /**
505  * schedule_on_each_cpu - call a function on each online CPU from keventd
506  * @func: the function to call
507  * @info: a pointer to pass to func()
508  *
509  * Returns zero on success.
510  * Returns -ve errno on failure.
511  *
512  * Appears to be racy against CPU hotplug.
513  *
514  * schedule_on_each_cpu() is very slow.
515  */
516 int schedule_on_each_cpu(void (*func)(void *info), void *info)
517 {
518         int cpu;
519         struct work_struct *works;
520
521         works = alloc_percpu(struct work_struct);
522         if (!works)
523                 return -ENOMEM;
524
525         mutex_lock(&workqueue_mutex);
526         for_each_online_cpu(cpu) {
527                 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
528                 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
529                                 per_cpu_ptr(works, cpu));
530         }
531         mutex_unlock(&workqueue_mutex);
532         flush_workqueue(keventd_wq);
533         free_percpu(works);
534         return 0;
535 }
536
537 void flush_scheduled_work(void)
538 {
539         flush_workqueue(keventd_wq);
540 }
541 EXPORT_SYMBOL(flush_scheduled_work);
542
543 /**
544  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
545  *                      work whose handler rearms the delayed work.
546  * @wq:   the controlling workqueue structure
547  * @dwork: the delayed work struct
548  */
549 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
550                                        struct delayed_work *dwork)
551 {
552         while (!cancel_delayed_work(dwork))
553                 flush_workqueue(wq);
554 }
555 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
556
557 /**
558  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
559  *                      work whose handler rearms the delayed work.
560  * @dwork: the delayed work struct
561  */
562 void cancel_rearming_delayed_work(struct delayed_work *dwork)
563 {
564         cancel_rearming_delayed_workqueue(keventd_wq, dwork);
565 }
566 EXPORT_SYMBOL(cancel_rearming_delayed_work);
567
568 /**
569  * execute_in_process_context - reliably execute the routine with user context
570  * @fn:         the function to execute
571  * @data:       data to pass to the function
572  * @ew:         guaranteed storage for the execute work structure (must
573  *              be available when the work executes)
574  *
575  * Executes the function immediately if process context is available,
576  * otherwise schedules the function for delayed execution.
577  *
578  * Returns:     0 - function was executed
579  *              1 - function was scheduled for execution
580  */
581 int execute_in_process_context(void (*fn)(void *data), void *data,
582                                struct execute_work *ew)
583 {
584         if (!in_interrupt()) {
585                 fn(data);
586                 return 0;
587         }
588
589         INIT_WORK(&ew->work, fn, data);
590         schedule_work(&ew->work);
591
592         return 1;
593 }
594 EXPORT_SYMBOL_GPL(execute_in_process_context);
595
596 int keventd_up(void)
597 {
598         return keventd_wq != NULL;
599 }
600
601 int current_is_keventd(void)
602 {
603         struct cpu_workqueue_struct *cwq;
604         int cpu = smp_processor_id();   /* preempt-safe: keventd is per-cpu */
605         int ret = 0;
606
607         BUG_ON(!keventd_wq);
608
609         cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
610         if (current == cwq->thread)
611                 ret = 1;
612
613         return ret;
614
615 }
616
617 #ifdef CONFIG_HOTPLUG_CPU
618 /* Take the work from this (downed) CPU. */
619 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
620 {
621         struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
622         struct list_head list;
623         struct work_struct *work;
624
625         spin_lock_irq(&cwq->lock);
626         list_replace_init(&cwq->worklist, &list);
627
628         while (!list_empty(&list)) {
629                 printk("Taking work for %s\n", wq->name);
630                 work = list_entry(list.next,struct work_struct,entry);
631                 list_del(&work->entry);
632                 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
633         }
634         spin_unlock_irq(&cwq->lock);
635 }
636
637 /* We're holding the cpucontrol mutex here */
638 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
639                                   unsigned long action,
640                                   void *hcpu)
641 {
642         unsigned int hotcpu = (unsigned long)hcpu;
643         struct workqueue_struct *wq;
644
645         switch (action) {
646         case CPU_UP_PREPARE:
647                 mutex_lock(&workqueue_mutex);
648                 /* Create a new workqueue thread for it. */
649                 list_for_each_entry(wq, &workqueues, list) {
650                         if (!create_workqueue_thread(wq, hotcpu)) {
651                                 printk("workqueue for %i failed\n", hotcpu);
652                                 return NOTIFY_BAD;
653                         }
654                 }
655                 break;
656
657         case CPU_ONLINE:
658                 /* Kick off worker threads. */
659                 list_for_each_entry(wq, &workqueues, list) {
660                         struct cpu_workqueue_struct *cwq;
661
662                         cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
663                         kthread_bind(cwq->thread, hotcpu);
664                         wake_up_process(cwq->thread);
665                 }
666                 mutex_unlock(&workqueue_mutex);
667                 break;
668
669         case CPU_UP_CANCELED:
670                 list_for_each_entry(wq, &workqueues, list) {
671                         if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
672                                 continue;
673                         /* Unbind so it can run. */
674                         kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
675                                      any_online_cpu(cpu_online_map));
676                         cleanup_workqueue_thread(wq, hotcpu);
677                 }
678                 mutex_unlock(&workqueue_mutex);
679                 break;
680
681         case CPU_DOWN_PREPARE:
682                 mutex_lock(&workqueue_mutex);
683                 break;
684
685         case CPU_DOWN_FAILED:
686                 mutex_unlock(&workqueue_mutex);
687                 break;
688
689         case CPU_DEAD:
690                 list_for_each_entry(wq, &workqueues, list)
691                         cleanup_workqueue_thread(wq, hotcpu);
692                 list_for_each_entry(wq, &workqueues, list)
693                         take_over_work(wq, hotcpu);
694                 mutex_unlock(&workqueue_mutex);
695                 break;
696         }
697
698         return NOTIFY_OK;
699 }
700 #endif
701
702 void init_workqueues(void)
703 {
704         singlethread_cpu = first_cpu(cpu_possible_map);
705         hotcpu_notifier(workqueue_cpu_callback, 0);
706         keventd_wq = create_workqueue("events");
707         BUG_ON(!keventd_wq);
708 }
709