blob: 026f778e879b978728ebdece9057b71960a3b668 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Tejun Heoc54fce62010-09-10 16:51:36 +02002 * kernel/workqueue.c - generic async execution with shared worker pool
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Tejun Heoc54fce62010-09-10 16:51:36 +02004 * Copyright (C) 2002 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Tejun Heoc54fce62010-09-10 16:51:36 +02006 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080011 *
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Made to use alloc_percpu by Christoph Lameter.
Tejun Heoc54fce62010-09-10 16:51:36 +020013 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 */
25
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/init.h>
30#include <linux/signal.h>
31#include <linux/completion.h>
32#include <linux/workqueue.h>
33#include <linux/slab.h>
34#include <linux/cpu.h>
35#include <linux/notifier.h>
36#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060037#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070038#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080039#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080040#include <linux/kallsyms.h>
41#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070042#include <linux/lockdep.h>
Tejun Heoc34056a2010-06-29 10:07:11 +020043#include <linux/idr.h>
Tejun Heoe22bee72010-06-29 10:07:14 +020044
45#include "workqueue_sched.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Tejun Heoc8e55f32010-06-29 10:07:12 +020047enum {
Tejun Heodb7bccf2010-06-29 10:07:12 +020048 /* global_cwq flags */
Tejun Heoe22bee72010-06-29 10:07:14 +020049 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
50 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
51 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
Tejun Heodb7bccf2010-06-29 10:07:12 +020052 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
Tejun Heo649027d2010-06-29 10:07:14 +020053 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
Tejun Heodb7bccf2010-06-29 10:07:12 +020054
Tejun Heoc8e55f32010-06-29 10:07:12 +020055 /* worker flags */
56 WORKER_STARTED = 1 << 0, /* started */
57 WORKER_DIE = 1 << 1, /* die die die */
58 WORKER_IDLE = 1 << 2, /* is idle */
Tejun Heoe22bee72010-06-29 10:07:14 +020059 WORKER_PREP = 1 << 3, /* preparing to run works */
Tejun Heodb7bccf2010-06-29 10:07:12 +020060 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
Tejun Heoe22bee72010-06-29 10:07:14 +020061 WORKER_REBIND = 1 << 5, /* mom is home, come back */
Tejun Heofb0e7be2010-06-29 10:07:15 +020062 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
Tejun Heof3421792010-07-02 10:03:51 +020063 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
Tejun Heoe22bee72010-06-29 10:07:14 +020064
Tejun Heofb0e7be2010-06-29 10:07:15 +020065 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
Tejun Heof3421792010-07-02 10:03:51 +020066 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
Tejun Heodb7bccf2010-06-29 10:07:12 +020067
68 /* gcwq->trustee_state */
69 TRUSTEE_START = 0, /* start */
70 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
71 TRUSTEE_BUTCHER = 2, /* butcher workers */
72 TRUSTEE_RELEASE = 3, /* release workers */
73 TRUSTEE_DONE = 4, /* trustee is done */
Tejun Heoc8e55f32010-06-29 10:07:12 +020074
75 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
76 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
77 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
Tejun Heodb7bccf2010-06-29 10:07:12 +020078
Tejun Heoe22bee72010-06-29 10:07:14 +020079 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */
Tejun Heodb7bccf2010-06-29 10:07:12 +020085 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
Tejun Heoe22bee72010-06-29 10:07:14 +020086
87 /*
88 * Rescue workers are used only on emergencies and shared by
89 * all cpus. Give -20.
90 */
91 RESCUER_NICE_LEVEL = -20,
Tejun Heoc8e55f32010-06-29 10:07:12 +020092};
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/*
Tejun Heo4690c4a2010-06-29 10:07:10 +020095 * Structure fields follow one of the following exclusion rules.
96 *
Tejun Heoe41e7042010-08-24 14:22:47 +020097 * I: Modifiable by initialization/destruction paths and read-only for
98 * everyone else.
Tejun Heo4690c4a2010-06-29 10:07:10 +020099 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200100 * P: Preemption protected. Disabling preemption is enough and should
101 * only be modified and accessed from the local cpu.
102 *
Tejun Heo8b03ae32010-06-29 10:07:12 +0200103 * L: gcwq->lock protected. Access with gcwq->lock held.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200104 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200105 * X: During normal operation, modification requires gcwq->lock and
106 * should be done only from local cpu. Either disabling preemption
107 * on local cpu or grabbing gcwq->lock is enough for read access.
Tejun Heof3421792010-07-02 10:03:51 +0200108 * If GCWQ_DISASSOCIATED is set, it's identical to L.
Tejun Heoe22bee72010-06-29 10:07:14 +0200109 *
Tejun Heo73f53c42010-06-29 10:07:11 +0200110 * F: wq->flush_mutex protected.
111 *
Tejun Heo4690c4a2010-06-29 10:07:10 +0200112 * W: workqueue_lock protected.
113 */
114
Tejun Heo8b03ae32010-06-29 10:07:12 +0200115struct global_cwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200116
Tejun Heoe22bee72010-06-29 10:07:14 +0200117/*
118 * The poor guys doing the actual heavy lifting. All on-duty workers
119 * are either serving the manager role, on idle list or on busy hash.
120 */
Tejun Heoc34056a2010-06-29 10:07:11 +0200121struct worker {
Tejun Heoc8e55f32010-06-29 10:07:12 +0200122 /* on idle list while idle, on busy hash table while busy */
123 union {
124 struct list_head entry; /* L: while idle */
125 struct hlist_node hentry; /* L: while busy */
126 };
127
Tejun Heoc34056a2010-06-29 10:07:11 +0200128 struct work_struct *current_work; /* L: work being processed */
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200129 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
Tejun Heoaffee4b2010-06-29 10:07:12 +0200130 struct list_head scheduled; /* L: scheduled works */
Tejun Heoc34056a2010-06-29 10:07:11 +0200131 struct task_struct *task; /* I: worker task */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200132 struct global_cwq *gcwq; /* I: the associated gcwq */
Tejun Heoe22bee72010-06-29 10:07:14 +0200133 /* 64 bytes boundary on 64bit, 32 on 32bit */
134 unsigned long last_active; /* L: last active timestamp */
135 unsigned int flags; /* X: flags */
Tejun Heoc34056a2010-06-29 10:07:11 +0200136 int id; /* I: worker id */
Tejun Heoe22bee72010-06-29 10:07:14 +0200137 struct work_struct rebind_work; /* L: rebind worker to cpu */
Tejun Heoc34056a2010-06-29 10:07:11 +0200138};
139
Tejun Heo4690c4a2010-06-29 10:07:10 +0200140/*
Tejun Heoe22bee72010-06-29 10:07:14 +0200141 * Global per-cpu workqueue. There's one and only one for each cpu
142 * and all works are queued and processed here regardless of their
143 * target workqueues.
Tejun Heo8b03ae32010-06-29 10:07:12 +0200144 */
145struct global_cwq {
146 spinlock_t lock; /* the gcwq lock */
Tejun Heo7e116292010-06-29 10:07:13 +0200147 struct list_head worklist; /* L: list of pending works */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200148 unsigned int cpu; /* I: the associated cpu */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200149 unsigned int flags; /* L: GCWQ_* flags */
Tejun Heoc8e55f32010-06-29 10:07:12 +0200150
151 int nr_workers; /* L: total number of workers */
152 int nr_idle; /* L: currently idle ones */
153
154 /* workers are chained either in the idle_list or busy_hash */
Tejun Heoe22bee72010-06-29 10:07:14 +0200155 struct list_head idle_list; /* X: list of idle workers */
Tejun Heoc8e55f32010-06-29 10:07:12 +0200156 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
157 /* L: hash of busy workers */
158
Tejun Heoe22bee72010-06-29 10:07:14 +0200159 struct timer_list idle_timer; /* L: worker idle timeout */
160 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
161
Tejun Heo8b03ae32010-06-29 10:07:12 +0200162 struct ida worker_ida; /* L: for worker IDs */
Tejun Heodb7bccf2010-06-29 10:07:12 +0200163
164 struct task_struct *trustee; /* L: for gcwq shutdown */
165 unsigned int trustee_state; /* L: trustee state */
166 wait_queue_head_t trustee_wait; /* trustee wait */
Tejun Heoe22bee72010-06-29 10:07:14 +0200167 struct worker *first_idle; /* L: first idle worker */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200168} ____cacheline_aligned_in_smp;
169
170/*
Tejun Heo502ca9d2010-06-29 10:07:13 +0200171 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
Tejun Heo0f900042010-06-29 10:07:11 +0200172 * work_struct->data are used for flags and thus cwqs need to be
173 * aligned at two's power of the number of flag bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 */
175struct cpu_workqueue_struct {
Tejun Heo8b03ae32010-06-29 10:07:12 +0200176 struct global_cwq *gcwq; /* I: the associated gcwq */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200177 struct workqueue_struct *wq; /* I: the owning workqueue */
Tejun Heo73f53c42010-06-29 10:07:11 +0200178 int work_color; /* L: current color */
179 int flush_color; /* L: flushing color */
180 int nr_in_flight[WORK_NR_COLORS];
181 /* L: nr of in_flight works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200182 int nr_active; /* L: nr of active works */
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200183 int max_active; /* L: max active works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200184 struct list_head delayed_works; /* L: delayed works */
Tejun Heo0f900042010-06-29 10:07:11 +0200185};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
Tejun Heo73f53c42010-06-29 10:07:11 +0200188 * Structure used to wait for workqueue flush.
189 */
190struct wq_flusher {
191 struct list_head list; /* F: list of flushers */
192 int flush_color; /* F: flush color waiting for */
193 struct completion done; /* flush completion */
194};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Tejun Heo73f53c42010-06-29 10:07:11 +0200196/*
Tejun Heof2e005a2010-07-20 15:59:09 +0200197 * All cpumasks are assumed to be always set on UP and thus can't be
198 * used to determine whether there's something to be done.
199 */
200#ifdef CONFIG_SMP
201typedef cpumask_var_t mayday_mask_t;
202#define mayday_test_and_set_cpu(cpu, mask) \
203 cpumask_test_and_set_cpu((cpu), (mask))
204#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
205#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
Tejun Heo9c375472010-08-31 11:18:34 +0200206#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
Tejun Heof2e005a2010-07-20 15:59:09 +0200207#define free_mayday_mask(mask) free_cpumask_var((mask))
208#else
209typedef unsigned long mayday_mask_t;
210#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
211#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
212#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
213#define alloc_mayday_mask(maskp, gfp) true
214#define free_mayday_mask(mask) do { } while (0)
215#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217/*
218 * The externally visible workqueue abstraction is an array of
219 * per-CPU workqueues:
220 */
221struct workqueue_struct {
Tejun Heo97e37d72010-06-29 10:07:10 +0200222 unsigned int flags; /* I: WQ_* flags */
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200223 union {
224 struct cpu_workqueue_struct __percpu *pcpu;
225 struct cpu_workqueue_struct *single;
226 unsigned long v;
227 } cpu_wq; /* I: cwq's */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200228 struct list_head list; /* W: list of all workqueues */
Tejun Heo73f53c42010-06-29 10:07:11 +0200229
230 struct mutex flush_mutex; /* protects wq flushing */
231 int work_color; /* F: current work color */
232 int flush_color; /* F: current flush color */
233 atomic_t nr_cwqs_to_flush; /* flush in progress */
234 struct wq_flusher *first_flusher; /* F: first flusher */
235 struct list_head flusher_queue; /* F: flush waiters */
236 struct list_head flusher_overflow; /* F: flush overflow list */
237
Tejun Heof2e005a2010-07-20 15:59:09 +0200238 mayday_mask_t mayday_mask; /* cpus requesting rescue */
Tejun Heoe22bee72010-06-29 10:07:14 +0200239 struct worker *rescuer; /* I: rescue worker */
240
Tejun Heodcd989c2010-06-29 10:07:14 +0200241 int saved_max_active; /* W: saved cwq max_active */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200242 const char *name; /* I: workqueue name */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700243#ifdef CONFIG_LOCKDEP
Tejun Heo4690c4a2010-06-29 10:07:10 +0200244 struct lockdep_map lockdep_map;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700245#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246};
247
Tejun Heod320c032010-06-29 10:07:14 +0200248struct workqueue_struct *system_wq __read_mostly;
249struct workqueue_struct *system_long_wq __read_mostly;
250struct workqueue_struct *system_nrt_wq __read_mostly;
Tejun Heof3421792010-07-02 10:03:51 +0200251struct workqueue_struct *system_unbound_wq __read_mostly;
Tejun Heod320c032010-06-29 10:07:14 +0200252EXPORT_SYMBOL_GPL(system_wq);
253EXPORT_SYMBOL_GPL(system_long_wq);
254EXPORT_SYMBOL_GPL(system_nrt_wq);
Tejun Heof3421792010-07-02 10:03:51 +0200255EXPORT_SYMBOL_GPL(system_unbound_wq);
Tejun Heod320c032010-06-29 10:07:14 +0200256
Tejun Heo97bd2342010-10-05 10:41:14 +0200257#define CREATE_TRACE_POINTS
258#include <trace/events/workqueue.h>
259
Tejun Heodb7bccf2010-06-29 10:07:12 +0200260#define for_each_busy_worker(worker, i, pos, gcwq) \
261 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
262 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
263
Tejun Heof3421792010-07-02 10:03:51 +0200264static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
265 unsigned int sw)
266{
267 if (cpu < nr_cpu_ids) {
268 if (sw & 1) {
269 cpu = cpumask_next(cpu, mask);
270 if (cpu < nr_cpu_ids)
271 return cpu;
272 }
273 if (sw & 2)
274 return WORK_CPU_UNBOUND;
275 }
276 return WORK_CPU_NONE;
277}
278
279static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
280 struct workqueue_struct *wq)
281{
282 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
283}
284
Tejun Heo09884952010-08-01 11:50:12 +0200285/*
286 * CPU iterators
287 *
288 * An extra gcwq is defined for an invalid cpu number
289 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
290 * specific CPU. The following iterators are similar to
291 * for_each_*_cpu() iterators but also considers the unbound gcwq.
292 *
293 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
294 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
295 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
296 * WORK_CPU_UNBOUND for unbound workqueues
297 */
Tejun Heof3421792010-07-02 10:03:51 +0200298#define for_each_gcwq_cpu(cpu) \
299 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
300 (cpu) < WORK_CPU_NONE; \
301 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
302
303#define for_each_online_gcwq_cpu(cpu) \
304 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
305 (cpu) < WORK_CPU_NONE; \
306 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
307
308#define for_each_cwq_cpu(cpu, wq) \
309 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
310 (cpu) < WORK_CPU_NONE; \
311 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
312
Paul E. McKenneya25909a2010-05-13 12:32:28 -0700313#ifdef CONFIG_LOCKDEP
314/**
315 * in_workqueue_context() - in context of specified workqueue?
316 * @wq: the workqueue of interest
317 *
318 * Checks lockdep state to see if the current task is executing from
319 * within a workqueue item. This function exists only if lockdep is
320 * enabled.
321 */
322int in_workqueue_context(struct workqueue_struct *wq)
323{
324 return lock_is_held(&wq->lockdep_map);
325}
326#endif
327
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900328#ifdef CONFIG_DEBUG_OBJECTS_WORK
329
330static struct debug_obj_descr work_debug_descr;
331
332/*
333 * fixup_init is called when:
334 * - an active object is initialized
335 */
336static int work_fixup_init(void *addr, enum debug_obj_state state)
337{
338 struct work_struct *work = addr;
339
340 switch (state) {
341 case ODEBUG_STATE_ACTIVE:
342 cancel_work_sync(work);
343 debug_object_init(work, &work_debug_descr);
344 return 1;
345 default:
346 return 0;
347 }
348}
349
350/*
351 * fixup_activate is called when:
352 * - an active object is activated
353 * - an unknown object is activated (might be a statically initialized object)
354 */
355static int work_fixup_activate(void *addr, enum debug_obj_state state)
356{
357 struct work_struct *work = addr;
358
359 switch (state) {
360
361 case ODEBUG_STATE_NOTAVAILABLE:
362 /*
363 * This is not really a fixup. The work struct was
364 * statically initialized. We just make sure that it
365 * is tracked in the object tracker.
366 */
Tejun Heo22df02b2010-06-29 10:07:10 +0200367 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900368 debug_object_init(work, &work_debug_descr);
369 debug_object_activate(work, &work_debug_descr);
370 return 0;
371 }
372 WARN_ON_ONCE(1);
373 return 0;
374
375 case ODEBUG_STATE_ACTIVE:
376 WARN_ON(1);
377
378 default:
379 return 0;
380 }
381}
382
383/*
384 * fixup_free is called when:
385 * - an active object is freed
386 */
387static int work_fixup_free(void *addr, enum debug_obj_state state)
388{
389 struct work_struct *work = addr;
390
391 switch (state) {
392 case ODEBUG_STATE_ACTIVE:
393 cancel_work_sync(work);
394 debug_object_free(work, &work_debug_descr);
395 return 1;
396 default:
397 return 0;
398 }
399}
400
401static struct debug_obj_descr work_debug_descr = {
402 .name = "work_struct",
403 .fixup_init = work_fixup_init,
404 .fixup_activate = work_fixup_activate,
405 .fixup_free = work_fixup_free,
406};
407
408static inline void debug_work_activate(struct work_struct *work)
409{
410 debug_object_activate(work, &work_debug_descr);
411}
412
413static inline void debug_work_deactivate(struct work_struct *work)
414{
415 debug_object_deactivate(work, &work_debug_descr);
416}
417
418void __init_work(struct work_struct *work, int onstack)
419{
420 if (onstack)
421 debug_object_init_on_stack(work, &work_debug_descr);
422 else
423 debug_object_init(work, &work_debug_descr);
424}
425EXPORT_SYMBOL_GPL(__init_work);
426
427void destroy_work_on_stack(struct work_struct *work)
428{
429 debug_object_free(work, &work_debug_descr);
430}
431EXPORT_SYMBOL_GPL(destroy_work_on_stack);
432
433#else
434static inline void debug_work_activate(struct work_struct *work) { }
435static inline void debug_work_deactivate(struct work_struct *work) { }
436#endif
437
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100438/* Serializes the accesses to the list of workqueues. */
439static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440static LIST_HEAD(workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200441static bool workqueue_freezing; /* W: have wqs started freezing? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Oleg Nesterov14441962007-05-23 13:57:57 -0700443/*
Tejun Heoe22bee72010-06-29 10:07:14 +0200444 * The almighty global cpu workqueues. nr_running is the only field
445 * which is expected to be used frequently by other cpus via
446 * try_to_wake_up(). Put it in a separate cacheline.
Oleg Nesterov14441962007-05-23 13:57:57 -0700447 */
Tejun Heo8b03ae32010-06-29 10:07:12 +0200448static DEFINE_PER_CPU(struct global_cwq, global_cwq);
Tejun Heoe22bee72010-06-29 10:07:14 +0200449static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800450
Tejun Heof3421792010-07-02 10:03:51 +0200451/*
452 * Global cpu workqueue and nr_running counter for unbound gcwq. The
453 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
454 * workers have WORKER_UNBOUND set.
455 */
456static struct global_cwq unbound_global_cwq;
457static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
458
Tejun Heoc34056a2010-06-29 10:07:11 +0200459static int worker_thread(void *__worker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Tejun Heo8b03ae32010-06-29 10:07:12 +0200461static struct global_cwq *get_gcwq(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462{
Tejun Heof3421792010-07-02 10:03:51 +0200463 if (cpu != WORK_CPU_UNBOUND)
464 return &per_cpu(global_cwq, cpu);
465 else
466 return &unbound_global_cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
Tejun Heoe22bee72010-06-29 10:07:14 +0200469static atomic_t *get_gcwq_nr_running(unsigned int cpu)
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700470{
Tejun Heof3421792010-07-02 10:03:51 +0200471 if (cpu != WORK_CPU_UNBOUND)
472 return &per_cpu(gcwq_nr_running, cpu);
473 else
474 return &unbound_gcwq_nr_running;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700475}
476
Tejun Heo4690c4a2010-06-29 10:07:10 +0200477static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
478 struct workqueue_struct *wq)
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700479{
Tejun Heof3421792010-07-02 10:03:51 +0200480 if (!(wq->flags & WQ_UNBOUND)) {
481 if (likely(cpu < nr_cpu_ids)) {
482#ifdef CONFIG_SMP
483 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200484#else
Tejun Heof3421792010-07-02 10:03:51 +0200485 return wq->cpu_wq.single;
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200486#endif
Tejun Heof3421792010-07-02 10:03:51 +0200487 }
488 } else if (likely(cpu == WORK_CPU_UNBOUND))
489 return wq->cpu_wq.single;
490 return NULL;
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700491}
492
Tejun Heo73f53c42010-06-29 10:07:11 +0200493static unsigned int work_color_to_flags(int color)
494{
495 return color << WORK_STRUCT_COLOR_SHIFT;
496}
497
498static int get_work_color(struct work_struct *work)
499{
500 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
501 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
502}
503
504static int work_next_color(int color)
505{
506 return (color + 1) % WORK_NR_COLORS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
508
David Howells4594bf12006-12-07 11:33:26 +0000509/*
Tejun Heoe1201532010-07-22 14:14:25 +0200510 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
511 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
512 * cleared and the work data contains the cpu number it was last on.
Tejun Heo7a22ad72010-06-29 10:07:13 +0200513 *
514 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
515 * cwq, cpu or clear work->data. These functions should only be
516 * called while the work is owned - ie. while the PENDING bit is set.
517 *
518 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
519 * corresponding to a work. gcwq is available once the work has been
520 * queued anywhere after initialization. cwq is available only from
521 * queueing until execution starts.
David Howells4594bf12006-12-07 11:33:26 +0000522 */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200523static inline void set_work_data(struct work_struct *work, unsigned long data,
524 unsigned long flags)
David Howells365970a2006-11-22 14:54:49 +0000525{
David Howells4594bf12006-12-07 11:33:26 +0000526 BUG_ON(!work_pending(work));
Tejun Heo7a22ad72010-06-29 10:07:13 +0200527 atomic_long_set(&work->data, data | flags | work_static(work));
David Howells365970a2006-11-22 14:54:49 +0000528}
David Howells365970a2006-11-22 14:54:49 +0000529
Tejun Heo7a22ad72010-06-29 10:07:13 +0200530static void set_work_cwq(struct work_struct *work,
531 struct cpu_workqueue_struct *cwq,
532 unsigned long extra_flags)
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200533{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200534 set_work_data(work, (unsigned long)cwq,
Tejun Heoe1201532010-07-22 14:14:25 +0200535 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200536}
537
Tejun Heo7a22ad72010-06-29 10:07:13 +0200538static void set_work_cpu(struct work_struct *work, unsigned int cpu)
David Howells365970a2006-11-22 14:54:49 +0000539{
Tejun Heo7a22ad72010-06-29 10:07:13 +0200540 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
541}
542
543static void clear_work_data(struct work_struct *work)
544{
545 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
546}
547
Tejun Heo7a22ad72010-06-29 10:07:13 +0200548static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
549{
Tejun Heoe1201532010-07-22 14:14:25 +0200550 unsigned long data = atomic_long_read(&work->data);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200551
Tejun Heoe1201532010-07-22 14:14:25 +0200552 if (data & WORK_STRUCT_CWQ)
553 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
554 else
555 return NULL;
Tejun Heo7a22ad72010-06-29 10:07:13 +0200556}
557
558static struct global_cwq *get_work_gcwq(struct work_struct *work)
559{
Tejun Heoe1201532010-07-22 14:14:25 +0200560 unsigned long data = atomic_long_read(&work->data);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200561 unsigned int cpu;
562
Tejun Heoe1201532010-07-22 14:14:25 +0200563 if (data & WORK_STRUCT_CWQ)
564 return ((struct cpu_workqueue_struct *)
565 (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
Tejun Heo7a22ad72010-06-29 10:07:13 +0200566
567 cpu = data >> WORK_STRUCT_FLAG_BITS;
Tejun Heobdbc5dd2010-07-02 10:03:51 +0200568 if (cpu == WORK_CPU_NONE)
Tejun Heo7a22ad72010-06-29 10:07:13 +0200569 return NULL;
570
Tejun Heof3421792010-07-02 10:03:51 +0200571 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
Tejun Heo7a22ad72010-06-29 10:07:13 +0200572 return get_gcwq(cpu);
David Howells365970a2006-11-22 14:54:49 +0000573}
574
575/*
Tejun Heoe22bee72010-06-29 10:07:14 +0200576 * Policy functions. These define the policies on how the global
577 * worker pool is managed. Unless noted otherwise, these functions
578 * assume that they're being called with gcwq->lock held.
David Howells365970a2006-11-22 14:54:49 +0000579 */
Tejun Heoe22bee72010-06-29 10:07:14 +0200580
Tejun Heo649027d2010-06-29 10:07:14 +0200581static bool __need_more_worker(struct global_cwq *gcwq)
David Howells365970a2006-11-22 14:54:49 +0000582{
Tejun Heo649027d2010-06-29 10:07:14 +0200583 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
584 gcwq->flags & GCWQ_HIGHPRI_PENDING;
David Howells365970a2006-11-22 14:54:49 +0000585}
586
Tejun Heoe22bee72010-06-29 10:07:14 +0200587/*
588 * Need to wake up a worker? Called from anything but currently
589 * running workers.
590 */
591static bool need_more_worker(struct global_cwq *gcwq)
David Howells365970a2006-11-22 14:54:49 +0000592{
Tejun Heo649027d2010-06-29 10:07:14 +0200593 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
David Howells365970a2006-11-22 14:54:49 +0000594}
595
Tejun Heoe22bee72010-06-29 10:07:14 +0200596/* Can I start working? Called from busy but !running workers. */
597static bool may_start_working(struct global_cwq *gcwq)
598{
599 return gcwq->nr_idle;
600}
601
602/* Do I need to keep working? Called from currently running workers. */
603static bool keep_working(struct global_cwq *gcwq)
604{
605 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
606
607 return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
608}
609
610/* Do we need a new worker? Called from manager. */
611static bool need_to_create_worker(struct global_cwq *gcwq)
612{
613 return need_more_worker(gcwq) && !may_start_working(gcwq);
614}
615
616/* Do I need to be the manager? */
617static bool need_to_manage_workers(struct global_cwq *gcwq)
618{
619 return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
620}
621
622/* Do we have too many workers and should some go away? */
623static bool too_many_workers(struct global_cwq *gcwq)
624{
625 bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
626 int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
627 int nr_busy = gcwq->nr_workers - nr_idle;
628
629 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
630}
631
632/*
633 * Wake up functions.
634 */
635
Tejun Heo7e116292010-06-29 10:07:13 +0200636/* Return the first worker. Safe with preemption disabled */
637static struct worker *first_worker(struct global_cwq *gcwq)
638{
639 if (unlikely(list_empty(&gcwq->idle_list)))
640 return NULL;
641
642 return list_first_entry(&gcwq->idle_list, struct worker, entry);
643}
644
645/**
646 * wake_up_worker - wake up an idle worker
647 * @gcwq: gcwq to wake worker for
648 *
649 * Wake up the first idle worker of @gcwq.
650 *
651 * CONTEXT:
652 * spin_lock_irq(gcwq->lock).
653 */
654static void wake_up_worker(struct global_cwq *gcwq)
655{
656 struct worker *worker = first_worker(gcwq);
657
658 if (likely(worker))
659 wake_up_process(worker->task);
660}
661
Tejun Heo4690c4a2010-06-29 10:07:10 +0200662/**
Tejun Heoe22bee72010-06-29 10:07:14 +0200663 * wq_worker_waking_up - a worker is waking up
664 * @task: task waking up
665 * @cpu: CPU @task is waking up to
666 *
667 * This function is called during try_to_wake_up() when a worker is
668 * being awoken.
669 *
670 * CONTEXT:
671 * spin_lock_irq(rq->lock)
672 */
673void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
674{
675 struct worker *worker = kthread_data(task);
676
677 if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
678 atomic_inc(get_gcwq_nr_running(cpu));
679}
680
681/**
682 * wq_worker_sleeping - a worker is going to sleep
683 * @task: task going to sleep
684 * @cpu: CPU in question, must be the current CPU number
685 *
686 * This function is called during schedule() when a busy worker is
687 * going to sleep. Worker on the same cpu can be woken up by
688 * returning pointer to its task.
689 *
690 * CONTEXT:
691 * spin_lock_irq(rq->lock)
692 *
693 * RETURNS:
694 * Worker task on @cpu to wake up, %NULL if none.
695 */
696struct task_struct *wq_worker_sleeping(struct task_struct *task,
697 unsigned int cpu)
698{
699 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
700 struct global_cwq *gcwq = get_gcwq(cpu);
701 atomic_t *nr_running = get_gcwq_nr_running(cpu);
702
703 if (unlikely(worker->flags & WORKER_NOT_RUNNING))
704 return NULL;
705
706 /* this can only happen on the local cpu */
707 BUG_ON(cpu != raw_smp_processor_id());
708
709 /*
710 * The counterpart of the following dec_and_test, implied mb,
711 * worklist not empty test sequence is in insert_work().
712 * Please read comment there.
713 *
714 * NOT_RUNNING is clear. This means that trustee is not in
715 * charge and we're running on the local cpu w/ rq lock held
716 * and preemption disabled, which in turn means that none else
717 * could be manipulating idle_list, so dereferencing idle_list
718 * without gcwq lock is safe.
719 */
720 if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
721 to_wakeup = first_worker(gcwq);
722 return to_wakeup ? to_wakeup->task : NULL;
723}
724
725/**
726 * worker_set_flags - set worker flags and adjust nr_running accordingly
Tejun Heocb444762010-07-02 10:03:50 +0200727 * @worker: self
Tejun Heod302f012010-06-29 10:07:13 +0200728 * @flags: flags to set
729 * @wakeup: wakeup an idle worker if necessary
730 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200731 * Set @flags in @worker->flags and adjust nr_running accordingly. If
732 * nr_running becomes zero and @wakeup is %true, an idle worker is
733 * woken up.
Tejun Heod302f012010-06-29 10:07:13 +0200734 *
Tejun Heocb444762010-07-02 10:03:50 +0200735 * CONTEXT:
736 * spin_lock_irq(gcwq->lock)
Tejun Heod302f012010-06-29 10:07:13 +0200737 */
738static inline void worker_set_flags(struct worker *worker, unsigned int flags,
739 bool wakeup)
740{
Tejun Heoe22bee72010-06-29 10:07:14 +0200741 struct global_cwq *gcwq = worker->gcwq;
742
Tejun Heocb444762010-07-02 10:03:50 +0200743 WARN_ON_ONCE(worker->task != current);
744
Tejun Heoe22bee72010-06-29 10:07:14 +0200745 /*
746 * If transitioning into NOT_RUNNING, adjust nr_running and
747 * wake up an idle worker as necessary if requested by
748 * @wakeup.
749 */
750 if ((flags & WORKER_NOT_RUNNING) &&
751 !(worker->flags & WORKER_NOT_RUNNING)) {
752 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
753
754 if (wakeup) {
755 if (atomic_dec_and_test(nr_running) &&
756 !list_empty(&gcwq->worklist))
757 wake_up_worker(gcwq);
758 } else
759 atomic_dec(nr_running);
760 }
761
Tejun Heod302f012010-06-29 10:07:13 +0200762 worker->flags |= flags;
763}
764
765/**
Tejun Heoe22bee72010-06-29 10:07:14 +0200766 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
Tejun Heocb444762010-07-02 10:03:50 +0200767 * @worker: self
Tejun Heod302f012010-06-29 10:07:13 +0200768 * @flags: flags to clear
769 *
Tejun Heoe22bee72010-06-29 10:07:14 +0200770 * Clear @flags in @worker->flags and adjust nr_running accordingly.
Tejun Heod302f012010-06-29 10:07:13 +0200771 *
Tejun Heocb444762010-07-02 10:03:50 +0200772 * CONTEXT:
773 * spin_lock_irq(gcwq->lock)
Tejun Heod302f012010-06-29 10:07:13 +0200774 */
775static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
776{
Tejun Heoe22bee72010-06-29 10:07:14 +0200777 struct global_cwq *gcwq = worker->gcwq;
778 unsigned int oflags = worker->flags;
779
Tejun Heocb444762010-07-02 10:03:50 +0200780 WARN_ON_ONCE(worker->task != current);
781
Tejun Heod302f012010-06-29 10:07:13 +0200782 worker->flags &= ~flags;
Tejun Heoe22bee72010-06-29 10:07:14 +0200783
784 /* if transitioning out of NOT_RUNNING, increment nr_running */
785 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
786 if (!(worker->flags & WORKER_NOT_RUNNING))
787 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
Tejun Heod302f012010-06-29 10:07:13 +0200788}
789
790/**
Tejun Heoc8e55f32010-06-29 10:07:12 +0200791 * busy_worker_head - return the busy hash head for a work
792 * @gcwq: gcwq of interest
793 * @work: work to be hashed
794 *
795 * Return hash head of @gcwq for @work.
796 *
797 * CONTEXT:
798 * spin_lock_irq(gcwq->lock).
799 *
800 * RETURNS:
801 * Pointer to the hash head.
802 */
803static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
804 struct work_struct *work)
805{
806 const int base_shift = ilog2(sizeof(struct work_struct));
807 unsigned long v = (unsigned long)work;
808
809 /* simple shift and fold hash, do we need something better? */
810 v >>= base_shift;
811 v += v >> BUSY_WORKER_HASH_ORDER;
812 v &= BUSY_WORKER_HASH_MASK;
813
814 return &gcwq->busy_hash[v];
815}
816
817/**
Tejun Heo8cca0ee2010-06-29 10:07:13 +0200818 * __find_worker_executing_work - find worker which is executing a work
819 * @gcwq: gcwq of interest
820 * @bwh: hash head as returned by busy_worker_head()
821 * @work: work to find worker for
822 *
823 * Find a worker which is executing @work on @gcwq. @bwh should be
824 * the hash head obtained by calling busy_worker_head() with the same
825 * work.
826 *
827 * CONTEXT:
828 * spin_lock_irq(gcwq->lock).
829 *
830 * RETURNS:
831 * Pointer to worker which is executing @work if found, NULL
832 * otherwise.
833 */
834static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
835 struct hlist_head *bwh,
836 struct work_struct *work)
837{
838 struct worker *worker;
839 struct hlist_node *tmp;
840
841 hlist_for_each_entry(worker, tmp, bwh, hentry)
842 if (worker->current_work == work)
843 return worker;
844 return NULL;
845}
846
847/**
848 * find_worker_executing_work - find worker which is executing a work
849 * @gcwq: gcwq of interest
850 * @work: work to find worker for
851 *
852 * Find a worker which is executing @work on @gcwq. This function is
853 * identical to __find_worker_executing_work() except that this
854 * function calculates @bwh itself.
855 *
856 * CONTEXT:
857 * spin_lock_irq(gcwq->lock).
858 *
859 * RETURNS:
860 * Pointer to worker which is executing @work if found, NULL
861 * otherwise.
862 */
863static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
864 struct work_struct *work)
865{
866 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
867 work);
868}
869
870/**
Tejun Heo649027d2010-06-29 10:07:14 +0200871 * gcwq_determine_ins_pos - find insertion position
872 * @gcwq: gcwq of interest
873 * @cwq: cwq a work is being queued for
874 *
875 * A work for @cwq is about to be queued on @gcwq, determine insertion
876 * position for the work. If @cwq is for HIGHPRI wq, the work is
877 * queued at the head of the queue but in FIFO order with respect to
878 * other HIGHPRI works; otherwise, at the end of the queue. This
879 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
880 * there are HIGHPRI works pending.
881 *
882 * CONTEXT:
883 * spin_lock_irq(gcwq->lock).
884 *
885 * RETURNS:
886 * Pointer to inserstion position.
887 */
888static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
889 struct cpu_workqueue_struct *cwq)
890{
891 struct work_struct *twork;
892
893 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
894 return &gcwq->worklist;
895
896 list_for_each_entry(twork, &gcwq->worklist, entry) {
897 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
898
899 if (!(tcwq->wq->flags & WQ_HIGHPRI))
900 break;
901 }
902
903 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
904 return &twork->entry;
905}
906
907/**
Tejun Heo7e116292010-06-29 10:07:13 +0200908 * insert_work - insert a work into gcwq
Tejun Heo4690c4a2010-06-29 10:07:10 +0200909 * @cwq: cwq @work belongs to
910 * @work: work to insert
911 * @head: insertion point
912 * @extra_flags: extra WORK_STRUCT_* flags to set
913 *
Tejun Heo7e116292010-06-29 10:07:13 +0200914 * Insert @work which belongs to @cwq into @gcwq after @head.
915 * @extra_flags is or'd to work_struct flags.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200916 *
917 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200918 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +0200919 */
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700920static void insert_work(struct cpu_workqueue_struct *cwq,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200921 struct work_struct *work, struct list_head *head,
922 unsigned int extra_flags)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700923{
Tejun Heoe22bee72010-06-29 10:07:14 +0200924 struct global_cwq *gcwq = cwq->gcwq;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +0100925
Tejun Heo4690c4a2010-06-29 10:07:10 +0200926 /* we own @work, set data and link */
Tejun Heo7a22ad72010-06-29 10:07:13 +0200927 set_work_cwq(work, cwq, extra_flags);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200928
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700929 /*
930 * Ensure that we get the right work->data if we see the
931 * result of list_add() below, see try_to_grab_pending().
932 */
933 smp_wmb();
Tejun Heo4690c4a2010-06-29 10:07:10 +0200934
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700935 list_add_tail(&work->entry, head);
Tejun Heoe22bee72010-06-29 10:07:14 +0200936
937 /*
938 * Ensure either worker_sched_deactivated() sees the above
939 * list_add_tail() or we see zero nr_running to avoid workers
940 * lying around lazily while there are works to be processed.
941 */
942 smp_mb();
943
Tejun Heo649027d2010-06-29 10:07:14 +0200944 if (__need_more_worker(gcwq))
Tejun Heoe22bee72010-06-29 10:07:14 +0200945 wake_up_worker(gcwq);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700946}
947
Tejun Heo4690c4a2010-06-29 10:07:10 +0200948static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 struct work_struct *work)
950{
Tejun Heo502ca9d2010-06-29 10:07:13 +0200951 struct global_cwq *gcwq;
952 struct cpu_workqueue_struct *cwq;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200953 struct list_head *worklist;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +0200954 unsigned int work_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 unsigned long flags;
956
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900957 debug_work_activate(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200958
Tejun Heoe41e7042010-08-24 14:22:47 +0200959 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
960 return;
961
Tejun Heoc7fc77f2010-07-02 10:03:51 +0200962 /* determine gcwq to use */
963 if (!(wq->flags & WQ_UNBOUND)) {
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200964 struct global_cwq *last_gcwq;
965
Tejun Heoc7fc77f2010-07-02 10:03:51 +0200966 if (unlikely(cpu == WORK_CPU_UNBOUND))
967 cpu = raw_smp_processor_id();
968
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200969 /*
970 * It's multi cpu. If @wq is non-reentrant and @work
971 * was previously on a different cpu, it might still
972 * be running there, in which case the work needs to
973 * be queued on that cpu to guarantee non-reentrance.
974 */
Tejun Heo502ca9d2010-06-29 10:07:13 +0200975 gcwq = get_gcwq(cpu);
Tejun Heo18aa9ef2010-06-29 10:07:13 +0200976 if (wq->flags & WQ_NON_REENTRANT &&
977 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
978 struct worker *worker;
979
980 spin_lock_irqsave(&last_gcwq->lock, flags);
981
982 worker = find_worker_executing_work(last_gcwq, work);
983
984 if (worker && worker->current_cwq->wq == wq)
985 gcwq = last_gcwq;
986 else {
987 /* meh... not running there, queue here */
988 spin_unlock_irqrestore(&last_gcwq->lock, flags);
989 spin_lock_irqsave(&gcwq->lock, flags);
990 }
991 } else
992 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heof3421792010-07-02 10:03:51 +0200993 } else {
994 gcwq = get_gcwq(WORK_CPU_UNBOUND);
995 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heo502ca9d2010-06-29 10:07:13 +0200996 }
997
998 /* gcwq determined, get cwq and queue */
999 cwq = get_cwq(gcwq->cpu, wq);
1000
Tejun Heo4690c4a2010-06-29 10:07:10 +02001001 BUG_ON(!list_empty(&work->entry));
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001002
Tejun Heo73f53c42010-06-29 10:07:11 +02001003 cwq->nr_in_flight[cwq->work_color]++;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001004 work_flags = work_color_to_flags(cwq->work_color);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001005
1006 if (likely(cwq->nr_active < cwq->max_active)) {
1007 cwq->nr_active++;
Tejun Heo649027d2010-06-29 10:07:14 +02001008 worklist = gcwq_determine_ins_pos(gcwq, cwq);
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001009 } else {
1010 work_flags |= WORK_STRUCT_DELAYED;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001011 worklist = &cwq->delayed_works;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001012 }
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001013
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001014 insert_work(cwq, work, worklist, work_flags);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001015
Tejun Heo8b03ae32010-06-29 10:07:12 +02001016 spin_unlock_irqrestore(&gcwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017}
1018
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001019/**
1020 * queue_work - queue work on a workqueue
1021 * @wq: workqueue to use
1022 * @work: work to queue
1023 *
Alan Stern057647f2006-10-28 10:38:58 -07001024 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07001026 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1027 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001029int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
Oleg Nesterovef1ca232008-07-25 01:47:53 -07001031 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Oleg Nesterovef1ca232008-07-25 01:47:53 -07001033 ret = queue_work_on(get_cpu(), wq, work);
1034 put_cpu();
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 return ret;
1037}
Dave Jonesae90dd52006-06-30 01:40:45 -04001038EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
Zhang Ruic1a220e2008-07-23 21:28:39 -07001040/**
1041 * queue_work_on - queue work on specific cpu
1042 * @cpu: CPU number to execute work on
1043 * @wq: workqueue to use
1044 * @work: work to queue
1045 *
1046 * Returns 0 if @work was already on a queue, non-zero otherwise.
1047 *
1048 * We queue the work to a specific CPU, the caller must ensure it
1049 * can't go away.
1050 */
1051int
1052queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1053{
1054 int ret = 0;
1055
Tejun Heo22df02b2010-06-29 10:07:10 +02001056 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo4690c4a2010-06-29 10:07:10 +02001057 __queue_work(cpu, wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -07001058 ret = 1;
1059 }
1060 return ret;
1061}
1062EXPORT_SYMBOL_GPL(queue_work_on);
1063
Li Zefan6d141c32008-02-08 04:21:09 -08001064static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065{
David Howells52bad642006-11-22 14:54:01 +00001066 struct delayed_work *dwork = (struct delayed_work *)__data;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001067 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Tejun Heo4690c4a2010-06-29 10:07:10 +02001069 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001072/**
1073 * queue_delayed_work - queue work on a workqueue after delay
1074 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -08001075 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001076 * @delay: number of jiffies to wait before queueing
1077 *
Alan Stern057647f2006-10-28 10:38:58 -07001078 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001079 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001080int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +00001081 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
David Howells52bad642006-11-22 14:54:01 +00001083 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001084 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001086 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
Dave Jonesae90dd52006-06-30 01:40:45 -04001088EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001090/**
1091 * queue_delayed_work_on - queue work on specific CPU after delay
1092 * @cpu: CPU number to execute work on
1093 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -08001094 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001095 * @delay: number of jiffies to wait before queueing
1096 *
Alan Stern057647f2006-10-28 10:38:58 -07001097 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001098 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001099int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +00001100 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001101{
1102 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +00001103 struct timer_list *timer = &dwork->timer;
1104 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001105
Tejun Heo22df02b2010-06-29 10:07:10 +02001106 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001107 unsigned int lcpu;
Tejun Heo7a22ad72010-06-29 10:07:13 +02001108
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001109 BUG_ON(timer_pending(timer));
1110 BUG_ON(!list_empty(&work->entry));
1111
Andrew Liu8a3e77c2008-05-01 04:35:14 -07001112 timer_stats_timer_set_start_info(&dwork->timer);
1113
Tejun Heo7a22ad72010-06-29 10:07:13 +02001114 /*
1115 * This stores cwq for the moment, for the timer_fn.
1116 * Note that the work's gcwq is preserved to allow
1117 * reentrance detection for delayed works.
1118 */
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001119 if (!(wq->flags & WQ_UNBOUND)) {
1120 struct global_cwq *gcwq = get_work_gcwq(work);
1121
1122 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1123 lcpu = gcwq->cpu;
1124 else
1125 lcpu = raw_smp_processor_id();
1126 } else
1127 lcpu = WORK_CPU_UNBOUND;
1128
Tejun Heo7a22ad72010-06-29 10:07:13 +02001129 set_work_cwq(work, get_cwq(lcpu, wq), 0);
Tejun Heoc7fc77f2010-07-02 10:03:51 +02001130
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001131 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +00001132 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001133 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -07001134
1135 if (unlikely(cpu >= 0))
1136 add_timer_on(timer, cpu);
1137 else
1138 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -07001139 ret = 1;
1140 }
1141 return ret;
1142}
Dave Jonesae90dd52006-06-30 01:40:45 -04001143EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Tejun Heoc8e55f32010-06-29 10:07:12 +02001145/**
1146 * worker_enter_idle - enter idle state
1147 * @worker: worker which is entering idle state
1148 *
1149 * @worker is entering idle state. Update stats and idle timer if
1150 * necessary.
1151 *
1152 * LOCKING:
1153 * spin_lock_irq(gcwq->lock).
1154 */
1155static void worker_enter_idle(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
Tejun Heoc8e55f32010-06-29 10:07:12 +02001157 struct global_cwq *gcwq = worker->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Tejun Heoc8e55f32010-06-29 10:07:12 +02001159 BUG_ON(worker->flags & WORKER_IDLE);
1160 BUG_ON(!list_empty(&worker->entry) &&
1161 (worker->hentry.next || worker->hentry.pprev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Tejun Heocb444762010-07-02 10:03:50 +02001163 /* can't use worker_set_flags(), also called from start_worker() */
1164 worker->flags |= WORKER_IDLE;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001165 gcwq->nr_idle++;
Tejun Heoe22bee72010-06-29 10:07:14 +02001166 worker->last_active = jiffies;
Peter Zijlstrad5abe662006-12-06 20:37:26 -08001167
Tejun Heoc8e55f32010-06-29 10:07:12 +02001168 /* idle_list is LIFO */
1169 list_add(&worker->entry, &gcwq->idle_list);
Tejun Heodb7bccf2010-06-29 10:07:12 +02001170
Tejun Heoe22bee72010-06-29 10:07:14 +02001171 if (likely(!(worker->flags & WORKER_ROGUE))) {
1172 if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1173 mod_timer(&gcwq->idle_timer,
1174 jiffies + IDLE_WORKER_TIMEOUT);
1175 } else
Tejun Heodb7bccf2010-06-29 10:07:12 +02001176 wake_up_all(&gcwq->trustee_wait);
Tejun Heocb444762010-07-02 10:03:50 +02001177
1178 /* sanity check nr_running */
1179 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1180 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181}
1182
Tejun Heoc8e55f32010-06-29 10:07:12 +02001183/**
1184 * worker_leave_idle - leave idle state
1185 * @worker: worker which is leaving idle state
1186 *
1187 * @worker is leaving idle state. Update stats.
1188 *
1189 * LOCKING:
1190 * spin_lock_irq(gcwq->lock).
1191 */
1192static void worker_leave_idle(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Tejun Heoc8e55f32010-06-29 10:07:12 +02001194 struct global_cwq *gcwq = worker->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Tejun Heoc8e55f32010-06-29 10:07:12 +02001196 BUG_ON(!(worker->flags & WORKER_IDLE));
Tejun Heod302f012010-06-29 10:07:13 +02001197 worker_clr_flags(worker, WORKER_IDLE);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001198 gcwq->nr_idle--;
1199 list_del_init(&worker->entry);
1200}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Tejun Heoe22bee72010-06-29 10:07:14 +02001202/**
1203 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1204 * @worker: self
1205 *
1206 * Works which are scheduled while the cpu is online must at least be
1207 * scheduled to a worker which is bound to the cpu so that if they are
1208 * flushed from cpu callbacks while cpu is going down, they are
1209 * guaranteed to execute on the cpu.
1210 *
1211 * This function is to be used by rogue workers and rescuers to bind
1212 * themselves to the target cpu and may race with cpu going down or
1213 * coming online. kthread_bind() can't be used because it may put the
1214 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1215 * verbatim as it's best effort and blocking and gcwq may be
1216 * [dis]associated in the meantime.
1217 *
1218 * This function tries set_cpus_allowed() and locks gcwq and verifies
1219 * the binding against GCWQ_DISASSOCIATED which is set during
1220 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1221 * idle state or fetches works without dropping lock, it can guarantee
1222 * the scheduling requirement described in the first paragraph.
1223 *
1224 * CONTEXT:
1225 * Might sleep. Called without any lock but returns with gcwq->lock
1226 * held.
1227 *
1228 * RETURNS:
1229 * %true if the associated gcwq is online (@worker is successfully
1230 * bound), %false if offline.
1231 */
1232static bool worker_maybe_bind_and_lock(struct worker *worker)
Namhyung Kim972fa1c2010-08-22 23:19:43 +09001233__acquires(&gcwq->lock)
Tejun Heoe22bee72010-06-29 10:07:14 +02001234{
1235 struct global_cwq *gcwq = worker->gcwq;
1236 struct task_struct *task = worker->task;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Tejun Heoe22bee72010-06-29 10:07:14 +02001238 while (true) {
1239 /*
1240 * The following call may fail, succeed or succeed
1241 * without actually migrating the task to the cpu if
1242 * it races with cpu hotunplug operation. Verify
1243 * against GCWQ_DISASSOCIATED.
1244 */
Tejun Heof3421792010-07-02 10:03:51 +02001245 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1246 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
Oleg Nesterov85f41862007-05-09 02:34:20 -07001247
Tejun Heoe22bee72010-06-29 10:07:14 +02001248 spin_lock_irq(&gcwq->lock);
1249 if (gcwq->flags & GCWQ_DISASSOCIATED)
1250 return false;
1251 if (task_cpu(task) == gcwq->cpu &&
1252 cpumask_equal(&current->cpus_allowed,
1253 get_cpu_mask(gcwq->cpu)))
1254 return true;
1255 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001256
Tejun Heoe22bee72010-06-29 10:07:14 +02001257 /* CPU has come up inbetween, retry migration */
1258 cpu_relax();
1259 }
1260}
1261
1262/*
1263 * Function for worker->rebind_work used to rebind rogue busy workers
1264 * to the associated cpu which is coming back online. This is
1265 * scheduled by cpu up but can race with other cpu hotplug operations
1266 * and may be executed twice without intervening cpu down.
1267 */
1268static void worker_rebind_fn(struct work_struct *work)
1269{
1270 struct worker *worker = container_of(work, struct worker, rebind_work);
1271 struct global_cwq *gcwq = worker->gcwq;
1272
1273 if (worker_maybe_bind_and_lock(worker))
1274 worker_clr_flags(worker, WORKER_REBIND);
1275
1276 spin_unlock_irq(&gcwq->lock);
1277}
1278
Tejun Heoc34056a2010-06-29 10:07:11 +02001279static struct worker *alloc_worker(void)
1280{
1281 struct worker *worker;
1282
1283 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001284 if (worker) {
1285 INIT_LIST_HEAD(&worker->entry);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001286 INIT_LIST_HEAD(&worker->scheduled);
Tejun Heoe22bee72010-06-29 10:07:14 +02001287 INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1288 /* on creation a worker is in !idle && prep state */
1289 worker->flags = WORKER_PREP;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001290 }
Tejun Heoc34056a2010-06-29 10:07:11 +02001291 return worker;
1292}
1293
1294/**
1295 * create_worker - create a new workqueue worker
Tejun Heo7e116292010-06-29 10:07:13 +02001296 * @gcwq: gcwq the new worker will belong to
Tejun Heoc34056a2010-06-29 10:07:11 +02001297 * @bind: whether to set affinity to @cpu or not
1298 *
Tejun Heo7e116292010-06-29 10:07:13 +02001299 * Create a new worker which is bound to @gcwq. The returned worker
Tejun Heoc34056a2010-06-29 10:07:11 +02001300 * can be started by calling start_worker() or destroyed using
1301 * destroy_worker().
1302 *
1303 * CONTEXT:
1304 * Might sleep. Does GFP_KERNEL allocations.
1305 *
1306 * RETURNS:
1307 * Pointer to the newly created worker.
1308 */
Tejun Heo7e116292010-06-29 10:07:13 +02001309static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
Tejun Heoc34056a2010-06-29 10:07:11 +02001310{
Tejun Heof3421792010-07-02 10:03:51 +02001311 bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
Tejun Heoc34056a2010-06-29 10:07:11 +02001312 struct worker *worker = NULL;
Tejun Heof3421792010-07-02 10:03:51 +02001313 int id = -1;
Tejun Heoc34056a2010-06-29 10:07:11 +02001314
Tejun Heo8b03ae32010-06-29 10:07:12 +02001315 spin_lock_irq(&gcwq->lock);
1316 while (ida_get_new(&gcwq->worker_ida, &id)) {
1317 spin_unlock_irq(&gcwq->lock);
1318 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
Tejun Heoc34056a2010-06-29 10:07:11 +02001319 goto fail;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001320 spin_lock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001321 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001322 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001323
1324 worker = alloc_worker();
1325 if (!worker)
1326 goto fail;
1327
Tejun Heo8b03ae32010-06-29 10:07:12 +02001328 worker->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001329 worker->id = id;
1330
Tejun Heof3421792010-07-02 10:03:51 +02001331 if (!on_unbound_cpu)
1332 worker->task = kthread_create(worker_thread, worker,
1333 "kworker/%u:%d", gcwq->cpu, id);
1334 else
1335 worker->task = kthread_create(worker_thread, worker,
1336 "kworker/u:%d", id);
Tejun Heoc34056a2010-06-29 10:07:11 +02001337 if (IS_ERR(worker->task))
1338 goto fail;
1339
Tejun Heodb7bccf2010-06-29 10:07:12 +02001340 /*
1341 * A rogue worker will become a regular one if CPU comes
1342 * online later on. Make sure every worker has
1343 * PF_THREAD_BOUND set.
1344 */
Tejun Heof3421792010-07-02 10:03:51 +02001345 if (bind && !on_unbound_cpu)
Tejun Heo8b03ae32010-06-29 10:07:12 +02001346 kthread_bind(worker->task, gcwq->cpu);
Tejun Heof3421792010-07-02 10:03:51 +02001347 else {
Tejun Heodb7bccf2010-06-29 10:07:12 +02001348 worker->task->flags |= PF_THREAD_BOUND;
Tejun Heof3421792010-07-02 10:03:51 +02001349 if (on_unbound_cpu)
1350 worker->flags |= WORKER_UNBOUND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07001352
Tejun Heoc34056a2010-06-29 10:07:11 +02001353 return worker;
1354fail:
1355 if (id >= 0) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001356 spin_lock_irq(&gcwq->lock);
1357 ida_remove(&gcwq->worker_ida, id);
1358 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +02001359 }
1360 kfree(worker);
1361 return NULL;
1362}
1363
1364/**
1365 * start_worker - start a newly created worker
1366 * @worker: worker to start
1367 *
Tejun Heoc8e55f32010-06-29 10:07:12 +02001368 * Make the gcwq aware of @worker and start it.
Tejun Heoc34056a2010-06-29 10:07:11 +02001369 *
1370 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001371 * spin_lock_irq(gcwq->lock).
Tejun Heoc34056a2010-06-29 10:07:11 +02001372 */
1373static void start_worker(struct worker *worker)
1374{
Tejun Heocb444762010-07-02 10:03:50 +02001375 worker->flags |= WORKER_STARTED;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001376 worker->gcwq->nr_workers++;
1377 worker_enter_idle(worker);
Tejun Heoc34056a2010-06-29 10:07:11 +02001378 wake_up_process(worker->task);
1379}
1380
1381/**
1382 * destroy_worker - destroy a workqueue worker
1383 * @worker: worker to be destroyed
1384 *
Tejun Heoc8e55f32010-06-29 10:07:12 +02001385 * Destroy @worker and adjust @gcwq stats accordingly.
1386 *
1387 * CONTEXT:
1388 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoc34056a2010-06-29 10:07:11 +02001389 */
1390static void destroy_worker(struct worker *worker)
1391{
Tejun Heo8b03ae32010-06-29 10:07:12 +02001392 struct global_cwq *gcwq = worker->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001393 int id = worker->id;
1394
1395 /* sanity check frenzy */
1396 BUG_ON(worker->current_work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001397 BUG_ON(!list_empty(&worker->scheduled));
Tejun Heoc34056a2010-06-29 10:07:11 +02001398
Tejun Heoc8e55f32010-06-29 10:07:12 +02001399 if (worker->flags & WORKER_STARTED)
1400 gcwq->nr_workers--;
1401 if (worker->flags & WORKER_IDLE)
1402 gcwq->nr_idle--;
1403
1404 list_del_init(&worker->entry);
Tejun Heocb444762010-07-02 10:03:50 +02001405 worker->flags |= WORKER_DIE;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001406
1407 spin_unlock_irq(&gcwq->lock);
1408
Tejun Heoc34056a2010-06-29 10:07:11 +02001409 kthread_stop(worker->task);
1410 kfree(worker);
1411
Tejun Heo8b03ae32010-06-29 10:07:12 +02001412 spin_lock_irq(&gcwq->lock);
1413 ida_remove(&gcwq->worker_ida, id);
Tejun Heoc34056a2010-06-29 10:07:11 +02001414}
1415
Tejun Heoe22bee72010-06-29 10:07:14 +02001416static void idle_worker_timeout(unsigned long __gcwq)
1417{
1418 struct global_cwq *gcwq = (void *)__gcwq;
1419
1420 spin_lock_irq(&gcwq->lock);
1421
1422 if (too_many_workers(gcwq)) {
1423 struct worker *worker;
1424 unsigned long expires;
1425
1426 /* idle_list is kept in LIFO order, check the last one */
1427 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1428 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1429
1430 if (time_before(jiffies, expires))
1431 mod_timer(&gcwq->idle_timer, expires);
1432 else {
1433 /* it's been idle for too long, wake up manager */
1434 gcwq->flags |= GCWQ_MANAGE_WORKERS;
1435 wake_up_worker(gcwq);
1436 }
1437 }
1438
1439 spin_unlock_irq(&gcwq->lock);
1440}
1441
1442static bool send_mayday(struct work_struct *work)
1443{
1444 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1445 struct workqueue_struct *wq = cwq->wq;
Tejun Heof3421792010-07-02 10:03:51 +02001446 unsigned int cpu;
Tejun Heoe22bee72010-06-29 10:07:14 +02001447
1448 if (!(wq->flags & WQ_RESCUER))
1449 return false;
1450
1451 /* mayday mayday mayday */
Tejun Heof3421792010-07-02 10:03:51 +02001452 cpu = cwq->gcwq->cpu;
1453 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1454 if (cpu == WORK_CPU_UNBOUND)
1455 cpu = 0;
Tejun Heof2e005a2010-07-20 15:59:09 +02001456 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
Tejun Heoe22bee72010-06-29 10:07:14 +02001457 wake_up_process(wq->rescuer->task);
1458 return true;
1459}
1460
1461static void gcwq_mayday_timeout(unsigned long __gcwq)
1462{
1463 struct global_cwq *gcwq = (void *)__gcwq;
1464 struct work_struct *work;
1465
1466 spin_lock_irq(&gcwq->lock);
1467
1468 if (need_to_create_worker(gcwq)) {
1469 /*
1470 * We've been trying to create a new worker but
1471 * haven't been successful. We might be hitting an
1472 * allocation deadlock. Send distress signals to
1473 * rescuers.
1474 */
1475 list_for_each_entry(work, &gcwq->worklist, entry)
1476 send_mayday(work);
1477 }
1478
1479 spin_unlock_irq(&gcwq->lock);
1480
1481 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1482}
1483
1484/**
1485 * maybe_create_worker - create a new worker if necessary
1486 * @gcwq: gcwq to create a new worker for
1487 *
1488 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1489 * have at least one idle worker on return from this function. If
1490 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1491 * sent to all rescuers with works scheduled on @gcwq to resolve
1492 * possible allocation deadlock.
1493 *
1494 * On return, need_to_create_worker() is guaranteed to be false and
1495 * may_start_working() true.
1496 *
1497 * LOCKING:
1498 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1499 * multiple times. Does GFP_KERNEL allocations. Called only from
1500 * manager.
1501 *
1502 * RETURNS:
1503 * false if no action was taken and gcwq->lock stayed locked, true
1504 * otherwise.
1505 */
1506static bool maybe_create_worker(struct global_cwq *gcwq)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09001507__releases(&gcwq->lock)
1508__acquires(&gcwq->lock)
Tejun Heoe22bee72010-06-29 10:07:14 +02001509{
1510 if (!need_to_create_worker(gcwq))
1511 return false;
1512restart:
Tejun Heo9f9c2362010-07-14 11:31:20 +02001513 spin_unlock_irq(&gcwq->lock);
1514
Tejun Heoe22bee72010-06-29 10:07:14 +02001515 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1516 mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1517
1518 while (true) {
1519 struct worker *worker;
1520
Tejun Heoe22bee72010-06-29 10:07:14 +02001521 worker = create_worker(gcwq, true);
1522 if (worker) {
1523 del_timer_sync(&gcwq->mayday_timer);
1524 spin_lock_irq(&gcwq->lock);
1525 start_worker(worker);
1526 BUG_ON(need_to_create_worker(gcwq));
1527 return true;
1528 }
1529
1530 if (!need_to_create_worker(gcwq))
1531 break;
1532
Tejun Heoe22bee72010-06-29 10:07:14 +02001533 __set_current_state(TASK_INTERRUPTIBLE);
1534 schedule_timeout(CREATE_COOLDOWN);
Tejun Heo9f9c2362010-07-14 11:31:20 +02001535
Tejun Heoe22bee72010-06-29 10:07:14 +02001536 if (!need_to_create_worker(gcwq))
1537 break;
1538 }
1539
Tejun Heoe22bee72010-06-29 10:07:14 +02001540 del_timer_sync(&gcwq->mayday_timer);
1541 spin_lock_irq(&gcwq->lock);
1542 if (need_to_create_worker(gcwq))
1543 goto restart;
1544 return true;
1545}
1546
1547/**
1548 * maybe_destroy_worker - destroy workers which have been idle for a while
1549 * @gcwq: gcwq to destroy workers for
1550 *
1551 * Destroy @gcwq workers which have been idle for longer than
1552 * IDLE_WORKER_TIMEOUT.
1553 *
1554 * LOCKING:
1555 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1556 * multiple times. Called only from manager.
1557 *
1558 * RETURNS:
1559 * false if no action was taken and gcwq->lock stayed locked, true
1560 * otherwise.
1561 */
1562static bool maybe_destroy_workers(struct global_cwq *gcwq)
1563{
1564 bool ret = false;
1565
1566 while (too_many_workers(gcwq)) {
1567 struct worker *worker;
1568 unsigned long expires;
1569
1570 worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1571 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1572
1573 if (time_before(jiffies, expires)) {
1574 mod_timer(&gcwq->idle_timer, expires);
1575 break;
1576 }
1577
1578 destroy_worker(worker);
1579 ret = true;
1580 }
1581
1582 return ret;
1583}
1584
1585/**
1586 * manage_workers - manage worker pool
1587 * @worker: self
1588 *
1589 * Assume the manager role and manage gcwq worker pool @worker belongs
1590 * to. At any given time, there can be only zero or one manager per
1591 * gcwq. The exclusion is handled automatically by this function.
1592 *
1593 * The caller can safely start processing works on false return. On
1594 * true return, it's guaranteed that need_to_create_worker() is false
1595 * and may_start_working() is true.
1596 *
1597 * CONTEXT:
1598 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1599 * multiple times. Does GFP_KERNEL allocations.
1600 *
1601 * RETURNS:
1602 * false if no action was taken and gcwq->lock stayed locked, true if
1603 * some action was taken.
1604 */
1605static bool manage_workers(struct worker *worker)
1606{
1607 struct global_cwq *gcwq = worker->gcwq;
1608 bool ret = false;
1609
1610 if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1611 return ret;
1612
1613 gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1614 gcwq->flags |= GCWQ_MANAGING_WORKERS;
1615
1616 /*
1617 * Destroy and then create so that may_start_working() is true
1618 * on return.
1619 */
1620 ret |= maybe_destroy_workers(gcwq);
1621 ret |= maybe_create_worker(gcwq);
1622
1623 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1624
1625 /*
1626 * The trustee might be waiting to take over the manager
1627 * position, tell it we're done.
1628 */
1629 if (unlikely(gcwq->trustee))
1630 wake_up_all(&gcwq->trustee_wait);
1631
1632 return ret;
1633}
1634
Tejun Heoa62428c2010-06-29 10:07:10 +02001635/**
Tejun Heoaffee4b2010-06-29 10:07:12 +02001636 * move_linked_works - move linked works to a list
1637 * @work: start of series of works to be scheduled
1638 * @head: target list to append @work to
1639 * @nextp: out paramter for nested worklist walking
1640 *
1641 * Schedule linked works starting from @work to @head. Work series to
1642 * be scheduled starts at @work and includes any consecutive work with
1643 * WORK_STRUCT_LINKED set in its predecessor.
1644 *
1645 * If @nextp is not NULL, it's updated to point to the next work of
1646 * the last scheduled work. This allows move_linked_works() to be
1647 * nested inside outer list_for_each_entry_safe().
1648 *
1649 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001650 * spin_lock_irq(gcwq->lock).
Tejun Heoaffee4b2010-06-29 10:07:12 +02001651 */
1652static void move_linked_works(struct work_struct *work, struct list_head *head,
1653 struct work_struct **nextp)
1654{
1655 struct work_struct *n;
1656
1657 /*
1658 * Linked worklist will always end before the end of the list,
1659 * use NULL for list head.
1660 */
1661 list_for_each_entry_safe_from(work, n, NULL, entry) {
1662 list_move_tail(&work->entry, head);
1663 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1664 break;
1665 }
1666
1667 /*
1668 * If we're already inside safe list traversal and have moved
1669 * multiple works to the scheduled queue, the next position
1670 * needs to be updated.
1671 */
1672 if (nextp)
1673 *nextp = n;
1674}
1675
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001676static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1677{
1678 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1679 struct work_struct, entry);
Tejun Heo649027d2010-06-29 10:07:14 +02001680 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001681
Tejun Heo649027d2010-06-29 10:07:14 +02001682 move_linked_works(work, pos, NULL);
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001683 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001684 cwq->nr_active++;
1685}
1686
Tejun Heoaffee4b2010-06-29 10:07:12 +02001687/**
Tejun Heo73f53c42010-06-29 10:07:11 +02001688 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1689 * @cwq: cwq of interest
1690 * @color: color of work which left the queue
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001691 * @delayed: for a delayed work
Tejun Heo73f53c42010-06-29 10:07:11 +02001692 *
1693 * A work either has completed or is removed from pending queue,
1694 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1695 *
1696 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001697 * spin_lock_irq(gcwq->lock).
Tejun Heo73f53c42010-06-29 10:07:11 +02001698 */
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001699static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1700 bool delayed)
Tejun Heo73f53c42010-06-29 10:07:11 +02001701{
1702 /* ignore uncolored works */
1703 if (color == WORK_NO_COLOR)
1704 return;
1705
1706 cwq->nr_in_flight[color]--;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001707
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001708 if (!delayed) {
1709 cwq->nr_active--;
1710 if (!list_empty(&cwq->delayed_works)) {
1711 /* one down, submit a delayed one */
1712 if (cwq->nr_active < cwq->max_active)
1713 cwq_activate_first_delayed(cwq);
1714 }
Tejun Heo502ca9d2010-06-29 10:07:13 +02001715 }
Tejun Heo73f53c42010-06-29 10:07:11 +02001716
1717 /* is flush in progress and are we at the flushing tip? */
1718 if (likely(cwq->flush_color != color))
1719 return;
1720
1721 /* are there still in-flight works? */
1722 if (cwq->nr_in_flight[color])
1723 return;
1724
1725 /* this cwq is done, clear flush_color */
1726 cwq->flush_color = -1;
1727
1728 /*
1729 * If this was the last cwq, wake up the first flusher. It
1730 * will handle the rest.
1731 */
1732 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1733 complete(&cwq->wq->first_flusher->done);
1734}
1735
1736/**
Tejun Heoa62428c2010-06-29 10:07:10 +02001737 * process_one_work - process single work
Tejun Heoc34056a2010-06-29 10:07:11 +02001738 * @worker: self
Tejun Heoa62428c2010-06-29 10:07:10 +02001739 * @work: work to process
1740 *
1741 * Process @work. This function contains all the logics necessary to
1742 * process a single work including synchronization against and
1743 * interaction with other workers on the same cpu, queueing and
1744 * flushing. As long as context requirement is met, any worker can
1745 * call this function to process a work.
1746 *
1747 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001748 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoa62428c2010-06-29 10:07:10 +02001749 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001750static void process_one_work(struct worker *worker, struct work_struct *work)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09001751__releases(&gcwq->lock)
1752__acquires(&gcwq->lock)
Tejun Heoa62428c2010-06-29 10:07:10 +02001753{
Tejun Heo7e116292010-06-29 10:07:13 +02001754 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001755 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001756 struct hlist_head *bwh = busy_worker_head(gcwq, work);
Tejun Heofb0e7be2010-06-29 10:07:15 +02001757 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
Tejun Heoa62428c2010-06-29 10:07:10 +02001758 work_func_t f = work->func;
Tejun Heo73f53c42010-06-29 10:07:11 +02001759 int work_color;
Tejun Heo7e116292010-06-29 10:07:13 +02001760 struct worker *collision;
Tejun Heoa62428c2010-06-29 10:07:10 +02001761#ifdef CONFIG_LOCKDEP
1762 /*
1763 * It is permissible to free the struct work_struct from
1764 * inside the function that is called from it, this we need to
1765 * take into account for lockdep too. To avoid bogus "held
1766 * lock freed" warnings as well as problems when looking into
1767 * work->lockdep_map, make a copy and use that here.
1768 */
1769 struct lockdep_map lockdep_map = work->lockdep_map;
1770#endif
Tejun Heo7e116292010-06-29 10:07:13 +02001771 /*
1772 * A single work shouldn't be executed concurrently by
1773 * multiple workers on a single cpu. Check whether anyone is
1774 * already processing the work. If so, defer the work to the
1775 * currently executing one.
1776 */
1777 collision = __find_worker_executing_work(gcwq, bwh, work);
1778 if (unlikely(collision)) {
1779 move_linked_works(work, &collision->scheduled, NULL);
1780 return;
1781 }
1782
Tejun Heoa62428c2010-06-29 10:07:10 +02001783 /* claim and process */
Tejun Heoa62428c2010-06-29 10:07:10 +02001784 debug_work_deactivate(work);
Tejun Heoc8e55f32010-06-29 10:07:12 +02001785 hlist_add_head(&worker->hentry, bwh);
Tejun Heoc34056a2010-06-29 10:07:11 +02001786 worker->current_work = work;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001787 worker->current_cwq = cwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02001788 work_color = get_work_color(work);
Tejun Heo7a22ad72010-06-29 10:07:13 +02001789
Tejun Heo7a22ad72010-06-29 10:07:13 +02001790 /* record the current cpu number in the work data and dequeue */
1791 set_work_cpu(work, gcwq->cpu);
Tejun Heoa62428c2010-06-29 10:07:10 +02001792 list_del_init(&work->entry);
1793
Tejun Heo649027d2010-06-29 10:07:14 +02001794 /*
1795 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1796 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1797 */
1798 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1799 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1800 struct work_struct, entry);
1801
1802 if (!list_empty(&gcwq->worklist) &&
1803 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1804 wake_up_worker(gcwq);
1805 else
1806 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1807 }
1808
Tejun Heofb0e7be2010-06-29 10:07:15 +02001809 /*
1810 * CPU intensive works don't participate in concurrency
1811 * management. They're the scheduler's responsibility.
1812 */
1813 if (unlikely(cpu_intensive))
1814 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1815
Tejun Heo8b03ae32010-06-29 10:07:12 +02001816 spin_unlock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001817
Tejun Heoa62428c2010-06-29 10:07:10 +02001818 work_clear_pending(work);
1819 lock_map_acquire(&cwq->wq->lockdep_map);
1820 lock_map_acquire(&lockdep_map);
Arjan van de Vene36c8862010-08-21 13:07:26 -07001821 trace_workqueue_execute_start(work);
Tejun Heoa62428c2010-06-29 10:07:10 +02001822 f(work);
Arjan van de Vene36c8862010-08-21 13:07:26 -07001823 /*
1824 * While we must be careful to not use "work" after this, the trace
1825 * point will only record its address.
1826 */
1827 trace_workqueue_execute_end(work);
Tejun Heoa62428c2010-06-29 10:07:10 +02001828 lock_map_release(&lockdep_map);
1829 lock_map_release(&cwq->wq->lockdep_map);
1830
1831 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1832 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1833 "%s/0x%08x/%d\n",
1834 current->comm, preempt_count(), task_pid_nr(current));
1835 printk(KERN_ERR " last function: ");
1836 print_symbol("%s\n", (unsigned long)f);
1837 debug_show_held_locks(current);
1838 dump_stack();
1839 }
1840
Tejun Heo8b03ae32010-06-29 10:07:12 +02001841 spin_lock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +02001842
Tejun Heofb0e7be2010-06-29 10:07:15 +02001843 /* clear cpu intensive status */
1844 if (unlikely(cpu_intensive))
1845 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1846
Tejun Heoa62428c2010-06-29 10:07:10 +02001847 /* we're done with it, release */
Tejun Heoc8e55f32010-06-29 10:07:12 +02001848 hlist_del_init(&worker->hentry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001849 worker->current_work = NULL;
Tejun Heo8cca0ee2010-06-29 10:07:13 +02001850 worker->current_cwq = NULL;
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02001851 cwq_dec_nr_in_flight(cwq, work_color, false);
Tejun Heoa62428c2010-06-29 10:07:10 +02001852}
1853
Tejun Heoaffee4b2010-06-29 10:07:12 +02001854/**
1855 * process_scheduled_works - process scheduled works
1856 * @worker: self
1857 *
1858 * Process all scheduled works. Please note that the scheduled list
1859 * may change while processing a work, so this function repeatedly
1860 * fetches a work from the top and executes it.
1861 *
1862 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001863 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
Tejun Heoaffee4b2010-06-29 10:07:12 +02001864 * multiple times.
1865 */
1866static void process_scheduled_works(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001868 while (!list_empty(&worker->scheduled)) {
1869 struct work_struct *work = list_first_entry(&worker->scheduled,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 struct work_struct, entry);
Tejun Heoc34056a2010-06-29 10:07:11 +02001871 process_one_work(worker, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873}
1874
Tejun Heo4690c4a2010-06-29 10:07:10 +02001875/**
1876 * worker_thread - the worker thread function
Tejun Heoc34056a2010-06-29 10:07:11 +02001877 * @__worker: self
Tejun Heo4690c4a2010-06-29 10:07:10 +02001878 *
Tejun Heoe22bee72010-06-29 10:07:14 +02001879 * The gcwq worker thread function. There's a single dynamic pool of
1880 * these per each cpu. These workers process all works regardless of
1881 * their specific target workqueue. The only exception is works which
1882 * belong to workqueues with a rescuer which will be explained in
1883 * rescuer_thread().
Tejun Heo4690c4a2010-06-29 10:07:10 +02001884 */
Tejun Heoc34056a2010-06-29 10:07:11 +02001885static int worker_thread(void *__worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886{
Tejun Heoc34056a2010-06-29 10:07:11 +02001887 struct worker *worker = __worker;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001888 struct global_cwq *gcwq = worker->gcwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
Tejun Heoe22bee72010-06-29 10:07:14 +02001890 /* tell the scheduler that this is a workqueue worker */
1891 worker->task->flags |= PF_WQ_WORKER;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001892woke_up:
Tejun Heoc8e55f32010-06-29 10:07:12 +02001893 spin_lock_irq(&gcwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Tejun Heoc8e55f32010-06-29 10:07:12 +02001895 /* DIE can be set only while we're idle, checking here is enough */
1896 if (worker->flags & WORKER_DIE) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001897 spin_unlock_irq(&gcwq->lock);
Tejun Heoe22bee72010-06-29 10:07:14 +02001898 worker->task->flags &= ~PF_WQ_WORKER;
Tejun Heoc8e55f32010-06-29 10:07:12 +02001899 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
1901
Tejun Heoc8e55f32010-06-29 10:07:12 +02001902 worker_leave_idle(worker);
Tejun Heodb7bccf2010-06-29 10:07:12 +02001903recheck:
Tejun Heoe22bee72010-06-29 10:07:14 +02001904 /* no more worker necessary? */
1905 if (!need_more_worker(gcwq))
1906 goto sleep;
1907
1908 /* do we need to manage? */
1909 if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1910 goto recheck;
1911
Tejun Heoc8e55f32010-06-29 10:07:12 +02001912 /*
1913 * ->scheduled list can only be filled while a worker is
1914 * preparing to process a work or actually processing it.
1915 * Make sure nobody diddled with it while I was sleeping.
1916 */
1917 BUG_ON(!list_empty(&worker->scheduled));
1918
Tejun Heoe22bee72010-06-29 10:07:14 +02001919 /*
1920 * When control reaches this point, we're guaranteed to have
1921 * at least one idle worker or that someone else has already
1922 * assumed the manager role.
1923 */
1924 worker_clr_flags(worker, WORKER_PREP);
1925
1926 do {
Tejun Heoc8e55f32010-06-29 10:07:12 +02001927 struct work_struct *work =
Tejun Heo7e116292010-06-29 10:07:13 +02001928 list_first_entry(&gcwq->worklist,
Tejun Heoc8e55f32010-06-29 10:07:12 +02001929 struct work_struct, entry);
1930
1931 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1932 /* optimization path, not strictly necessary */
1933 process_one_work(worker, work);
1934 if (unlikely(!list_empty(&worker->scheduled)))
1935 process_scheduled_works(worker);
1936 } else {
1937 move_linked_works(work, &worker->scheduled, NULL);
1938 process_scheduled_works(worker);
1939 }
Tejun Heoe22bee72010-06-29 10:07:14 +02001940 } while (keep_working(gcwq));
Tejun Heoc8e55f32010-06-29 10:07:12 +02001941
Tejun Heoe22bee72010-06-29 10:07:14 +02001942 worker_set_flags(worker, WORKER_PREP, false);
Tejun Heod313dd82010-07-02 10:03:51 +02001943sleep:
Tejun Heoe22bee72010-06-29 10:07:14 +02001944 if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1945 goto recheck;
Tejun Heod313dd82010-07-02 10:03:51 +02001946
Tejun Heoc8e55f32010-06-29 10:07:12 +02001947 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02001948 * gcwq->lock is held and there's no work to process and no
1949 * need to manage, sleep. Workers are woken up only while
1950 * holding gcwq->lock or from local cpu, so setting the
1951 * current state before releasing gcwq->lock is enough to
1952 * prevent losing any event.
Tejun Heoc8e55f32010-06-29 10:07:12 +02001953 */
1954 worker_enter_idle(worker);
1955 __set_current_state(TASK_INTERRUPTIBLE);
1956 spin_unlock_irq(&gcwq->lock);
1957 schedule();
1958 goto woke_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959}
1960
Tejun Heoe22bee72010-06-29 10:07:14 +02001961/**
1962 * rescuer_thread - the rescuer thread function
1963 * @__wq: the associated workqueue
1964 *
1965 * Workqueue rescuer thread function. There's one rescuer for each
1966 * workqueue which has WQ_RESCUER set.
1967 *
1968 * Regular work processing on a gcwq may block trying to create a new
1969 * worker which uses GFP_KERNEL allocation which has slight chance of
1970 * developing into deadlock if some works currently on the same queue
1971 * need to be processed to satisfy the GFP_KERNEL allocation. This is
1972 * the problem rescuer solves.
1973 *
1974 * When such condition is possible, the gcwq summons rescuers of all
1975 * workqueues which have works queued on the gcwq and let them process
1976 * those works so that forward progress can be guaranteed.
1977 *
1978 * This should happen rarely.
1979 */
1980static int rescuer_thread(void *__wq)
1981{
1982 struct workqueue_struct *wq = __wq;
1983 struct worker *rescuer = wq->rescuer;
1984 struct list_head *scheduled = &rescuer->scheduled;
Tejun Heof3421792010-07-02 10:03:51 +02001985 bool is_unbound = wq->flags & WQ_UNBOUND;
Tejun Heoe22bee72010-06-29 10:07:14 +02001986 unsigned int cpu;
1987
1988 set_user_nice(current, RESCUER_NICE_LEVEL);
1989repeat:
1990 set_current_state(TASK_INTERRUPTIBLE);
1991
1992 if (kthread_should_stop())
1993 return 0;
1994
Tejun Heof3421792010-07-02 10:03:51 +02001995 /*
1996 * See whether any cpu is asking for help. Unbounded
1997 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1998 */
Tejun Heof2e005a2010-07-20 15:59:09 +02001999 for_each_mayday_cpu(cpu, wq->mayday_mask) {
Tejun Heof3421792010-07-02 10:03:51 +02002000 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2001 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
Tejun Heoe22bee72010-06-29 10:07:14 +02002002 struct global_cwq *gcwq = cwq->gcwq;
2003 struct work_struct *work, *n;
2004
2005 __set_current_state(TASK_RUNNING);
Tejun Heof2e005a2010-07-20 15:59:09 +02002006 mayday_clear_cpu(cpu, wq->mayday_mask);
Tejun Heoe22bee72010-06-29 10:07:14 +02002007
2008 /* migrate to the target cpu if possible */
2009 rescuer->gcwq = gcwq;
2010 worker_maybe_bind_and_lock(rescuer);
2011
2012 /*
2013 * Slurp in all works issued via this workqueue and
2014 * process'em.
2015 */
2016 BUG_ON(!list_empty(&rescuer->scheduled));
2017 list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2018 if (get_work_cwq(work) == cwq)
2019 move_linked_works(work, scheduled, &n);
2020
2021 process_scheduled_works(rescuer);
2022 spin_unlock_irq(&gcwq->lock);
2023 }
2024
2025 schedule();
2026 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002029struct wq_barrier {
2030 struct work_struct work;
2031 struct completion done;
2032};
2033
2034static void wq_barrier_func(struct work_struct *work)
2035{
2036 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2037 complete(&barr->done);
2038}
2039
Tejun Heo4690c4a2010-06-29 10:07:10 +02002040/**
2041 * insert_wq_barrier - insert a barrier work
2042 * @cwq: cwq to insert barrier into
2043 * @barr: wq_barrier to insert
Tejun Heoaffee4b2010-06-29 10:07:12 +02002044 * @target: target work to attach @barr to
2045 * @worker: worker currently executing @target, NULL if @target is not executing
Tejun Heo4690c4a2010-06-29 10:07:10 +02002046 *
Tejun Heoaffee4b2010-06-29 10:07:12 +02002047 * @barr is linked to @target such that @barr is completed only after
2048 * @target finishes execution. Please note that the ordering
2049 * guarantee is observed only with respect to @target and on the local
2050 * cpu.
2051 *
2052 * Currently, a queued barrier can't be canceled. This is because
2053 * try_to_grab_pending() can't determine whether the work to be
2054 * grabbed is at the head of the queue and thus can't clear LINKED
2055 * flag of the previous work while there must be a valid next work
2056 * after a work with LINKED flag set.
2057 *
2058 * Note that when @worker is non-NULL, @target may be modified
2059 * underneath us, so we can't reliably determine cwq from @target.
Tejun Heo4690c4a2010-06-29 10:07:10 +02002060 *
2061 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02002062 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +02002063 */
Oleg Nesterov83c22522007-05-09 02:33:54 -07002064static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Tejun Heoaffee4b2010-06-29 10:07:12 +02002065 struct wq_barrier *barr,
2066 struct work_struct *target, struct worker *worker)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002067{
Tejun Heoaffee4b2010-06-29 10:07:12 +02002068 struct list_head *head;
2069 unsigned int linked = 0;
2070
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002071 /*
Tejun Heo8b03ae32010-06-29 10:07:12 +02002072 * debugobject calls are safe here even with gcwq->lock locked
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002073 * as we know for sure that this will not trigger any of the
2074 * checks and call back into the fixup functions where we
2075 * might deadlock.
2076 */
2077 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
Tejun Heo22df02b2010-06-29 10:07:10 +02002078 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002079 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -07002080
Tejun Heoaffee4b2010-06-29 10:07:12 +02002081 /*
2082 * If @target is currently being executed, schedule the
2083 * barrier to the worker; otherwise, put it after @target.
2084 */
2085 if (worker)
2086 head = worker->scheduled.next;
2087 else {
2088 unsigned long *bits = work_data_bits(target);
2089
2090 head = target->entry.next;
2091 /* there can already be other linked works, inherit and set */
2092 linked = *bits & WORK_STRUCT_LINKED;
2093 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2094 }
2095
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002096 debug_work_activate(&barr->work);
Tejun Heoaffee4b2010-06-29 10:07:12 +02002097 insert_work(cwq, &barr->work, head,
2098 work_color_to_flags(WORK_NO_COLOR) | linked);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002099}
2100
Tejun Heo73f53c42010-06-29 10:07:11 +02002101/**
2102 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2103 * @wq: workqueue being flushed
2104 * @flush_color: new flush color, < 0 for no-op
2105 * @work_color: new work color, < 0 for no-op
2106 *
2107 * Prepare cwqs for workqueue flushing.
2108 *
2109 * If @flush_color is non-negative, flush_color on all cwqs should be
2110 * -1. If no cwq has in-flight commands at the specified color, all
2111 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2112 * has in flight commands, its cwq->flush_color is set to
2113 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2114 * wakeup logic is armed and %true is returned.
2115 *
2116 * The caller should have initialized @wq->first_flusher prior to
2117 * calling this function with non-negative @flush_color. If
2118 * @flush_color is negative, no flush color update is done and %false
2119 * is returned.
2120 *
2121 * If @work_color is non-negative, all cwqs should have the same
2122 * work_color which is previous to @work_color and all will be
2123 * advanced to @work_color.
2124 *
2125 * CONTEXT:
2126 * mutex_lock(wq->flush_mutex).
2127 *
2128 * RETURNS:
2129 * %true if @flush_color >= 0 and there's something to flush. %false
2130 * otherwise.
2131 */
2132static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2133 int flush_color, int work_color)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134{
Tejun Heo73f53c42010-06-29 10:07:11 +02002135 bool wait = false;
2136 unsigned int cpu;
Oleg Nesterov14441962007-05-23 13:57:57 -07002137
Tejun Heo73f53c42010-06-29 10:07:11 +02002138 if (flush_color >= 0) {
2139 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2140 atomic_set(&wq->nr_cwqs_to_flush, 1);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002141 }
Oleg Nesterov14441962007-05-23 13:57:57 -07002142
Tejun Heof3421792010-07-02 10:03:51 +02002143 for_each_cwq_cpu(cpu, wq) {
Tejun Heo73f53c42010-06-29 10:07:11 +02002144 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002145 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heo73f53c42010-06-29 10:07:11 +02002146
Tejun Heo8b03ae32010-06-29 10:07:12 +02002147 spin_lock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02002148
2149 if (flush_color >= 0) {
2150 BUG_ON(cwq->flush_color != -1);
2151
2152 if (cwq->nr_in_flight[flush_color]) {
2153 cwq->flush_color = flush_color;
2154 atomic_inc(&wq->nr_cwqs_to_flush);
2155 wait = true;
2156 }
2157 }
2158
2159 if (work_color >= 0) {
2160 BUG_ON(work_color != work_next_color(cwq->work_color));
2161 cwq->work_color = work_color;
2162 }
2163
Tejun Heo8b03ae32010-06-29 10:07:12 +02002164 spin_unlock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +02002165 }
2166
2167 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2168 complete(&wq->first_flusher->done);
2169
2170 return wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171}
2172
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002173/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002175 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 *
2177 * Forces execution of the workqueue and blocks until its completion.
2178 * This is typically used in driver shutdown handlers.
2179 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -07002180 * We sleep until all works which were queued on entry have been handled,
2181 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002183void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184{
Tejun Heo73f53c42010-06-29 10:07:11 +02002185 struct wq_flusher this_flusher = {
2186 .list = LIST_HEAD_INIT(this_flusher.list),
2187 .flush_color = -1,
2188 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2189 };
2190 int next_color;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07002191
Ingo Molnar3295f0e2008-08-11 10:30:30 +02002192 lock_map_acquire(&wq->lockdep_map);
2193 lock_map_release(&wq->lockdep_map);
Tejun Heo73f53c42010-06-29 10:07:11 +02002194
2195 mutex_lock(&wq->flush_mutex);
2196
2197 /*
2198 * Start-to-wait phase
2199 */
2200 next_color = work_next_color(wq->work_color);
2201
2202 if (next_color != wq->flush_color) {
2203 /*
2204 * Color space is not full. The current work_color
2205 * becomes our flush_color and work_color is advanced
2206 * by one.
2207 */
2208 BUG_ON(!list_empty(&wq->flusher_overflow));
2209 this_flusher.flush_color = wq->work_color;
2210 wq->work_color = next_color;
2211
2212 if (!wq->first_flusher) {
2213 /* no flush in progress, become the first flusher */
2214 BUG_ON(wq->flush_color != this_flusher.flush_color);
2215
2216 wq->first_flusher = &this_flusher;
2217
2218 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2219 wq->work_color)) {
2220 /* nothing to flush, done */
2221 wq->flush_color = next_color;
2222 wq->first_flusher = NULL;
2223 goto out_unlock;
2224 }
2225 } else {
2226 /* wait in queue */
2227 BUG_ON(wq->flush_color == this_flusher.flush_color);
2228 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2229 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2230 }
2231 } else {
2232 /*
2233 * Oops, color space is full, wait on overflow queue.
2234 * The next flush completion will assign us
2235 * flush_color and transfer to flusher_queue.
2236 */
2237 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2238 }
2239
2240 mutex_unlock(&wq->flush_mutex);
2241
2242 wait_for_completion(&this_flusher.done);
2243
2244 /*
2245 * Wake-up-and-cascade phase
2246 *
2247 * First flushers are responsible for cascading flushes and
2248 * handling overflow. Non-first flushers can simply return.
2249 */
2250 if (wq->first_flusher != &this_flusher)
2251 return;
2252
2253 mutex_lock(&wq->flush_mutex);
2254
Tejun Heo4ce48b32010-07-02 10:03:51 +02002255 /* we might have raced, check again with mutex held */
2256 if (wq->first_flusher != &this_flusher)
2257 goto out_unlock;
2258
Tejun Heo73f53c42010-06-29 10:07:11 +02002259 wq->first_flusher = NULL;
2260
2261 BUG_ON(!list_empty(&this_flusher.list));
2262 BUG_ON(wq->flush_color != this_flusher.flush_color);
2263
2264 while (true) {
2265 struct wq_flusher *next, *tmp;
2266
2267 /* complete all the flushers sharing the current flush color */
2268 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2269 if (next->flush_color != wq->flush_color)
2270 break;
2271 list_del_init(&next->list);
2272 complete(&next->done);
2273 }
2274
2275 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2276 wq->flush_color != work_next_color(wq->work_color));
2277
2278 /* this flush_color is finished, advance by one */
2279 wq->flush_color = work_next_color(wq->flush_color);
2280
2281 /* one color has been freed, handle overflow queue */
2282 if (!list_empty(&wq->flusher_overflow)) {
2283 /*
2284 * Assign the same color to all overflowed
2285 * flushers, advance work_color and append to
2286 * flusher_queue. This is the start-to-wait
2287 * phase for these overflowed flushers.
2288 */
2289 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2290 tmp->flush_color = wq->work_color;
2291
2292 wq->work_color = work_next_color(wq->work_color);
2293
2294 list_splice_tail_init(&wq->flusher_overflow,
2295 &wq->flusher_queue);
2296 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2297 }
2298
2299 if (list_empty(&wq->flusher_queue)) {
2300 BUG_ON(wq->flush_color != wq->work_color);
2301 break;
2302 }
2303
2304 /*
2305 * Need to flush more colors. Make the next flusher
2306 * the new first flusher and arm cwqs.
2307 */
2308 BUG_ON(wq->flush_color == wq->work_color);
2309 BUG_ON(wq->flush_color != next->flush_color);
2310
2311 list_del_init(&next->list);
2312 wq->first_flusher = next;
2313
2314 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2315 break;
2316
2317 /*
2318 * Meh... this color is already done, clear first
2319 * flusher and repeat cascading.
2320 */
2321 wq->first_flusher = NULL;
2322 }
2323
2324out_unlock:
2325 mutex_unlock(&wq->flush_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326}
Dave Jonesae90dd52006-06-30 01:40:45 -04002327EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
Tejun Heobaf59022010-09-16 10:42:16 +02002329static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2330 bool wait_executing)
2331{
2332 struct worker *worker = NULL;
2333 struct global_cwq *gcwq;
2334 struct cpu_workqueue_struct *cwq;
2335
2336 might_sleep();
2337 gcwq = get_work_gcwq(work);
2338 if (!gcwq)
2339 return false;
2340
2341 spin_lock_irq(&gcwq->lock);
2342 if (!list_empty(&work->entry)) {
2343 /*
2344 * See the comment near try_to_grab_pending()->smp_rmb().
2345 * If it was re-queued to a different gcwq under us, we
2346 * are not going to wait.
2347 */
2348 smp_rmb();
2349 cwq = get_work_cwq(work);
2350 if (unlikely(!cwq || gcwq != cwq->gcwq))
2351 goto already_gone;
2352 } else if (wait_executing) {
2353 worker = find_worker_executing_work(gcwq, work);
2354 if (!worker)
2355 goto already_gone;
2356 cwq = worker->current_cwq;
2357 } else
2358 goto already_gone;
2359
2360 insert_wq_barrier(cwq, barr, work, worker);
2361 spin_unlock_irq(&gcwq->lock);
2362
2363 lock_map_acquire(&cwq->wq->lockdep_map);
2364 lock_map_release(&cwq->wq->lockdep_map);
2365 return true;
2366already_gone:
2367 spin_unlock_irq(&gcwq->lock);
2368 return false;
2369}
2370
Oleg Nesterovdb700892008-07-25 01:47:49 -07002371/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002372 * flush_work - wait for a work to finish executing the last queueing instance
2373 * @work: the work to flush
Oleg Nesterovdb700892008-07-25 01:47:49 -07002374 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002375 * Wait until @work has finished execution. This function considers
2376 * only the last queueing instance of @work. If @work has been
2377 * enqueued across different CPUs on a non-reentrant workqueue or on
2378 * multiple workqueues, @work might still be executing on return on
2379 * some of the CPUs from earlier queueing.
Oleg Nesterova67da702008-07-25 01:47:52 -07002380 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002381 * If @work was queued only on a non-reentrant, ordered or unbound
2382 * workqueue, @work is guaranteed to be idle on return if it hasn't
2383 * been requeued since flush started.
2384 *
2385 * RETURNS:
2386 * %true if flush_work() waited for the work to finish execution,
2387 * %false if it was already idle.
Oleg Nesterovdb700892008-07-25 01:47:49 -07002388 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002389bool flush_work(struct work_struct *work)
Oleg Nesterovdb700892008-07-25 01:47:49 -07002390{
Oleg Nesterovdb700892008-07-25 01:47:49 -07002391 struct wq_barrier barr;
2392
Tejun Heobaf59022010-09-16 10:42:16 +02002393 if (start_flush_work(work, &barr, true)) {
2394 wait_for_completion(&barr.done);
2395 destroy_work_on_stack(&barr.work);
2396 return true;
2397 } else
2398 return false;
Oleg Nesterovdb700892008-07-25 01:47:49 -07002399}
2400EXPORT_SYMBOL_GPL(flush_work);
2401
Tejun Heo401a8d02010-09-16 10:36:00 +02002402static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2403{
2404 struct wq_barrier barr;
2405 struct worker *worker;
2406
2407 spin_lock_irq(&gcwq->lock);
2408
2409 worker = find_worker_executing_work(gcwq, work);
2410 if (unlikely(worker))
2411 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2412
2413 spin_unlock_irq(&gcwq->lock);
2414
2415 if (unlikely(worker)) {
2416 wait_for_completion(&barr.done);
2417 destroy_work_on_stack(&barr.work);
2418 return true;
2419 } else
2420 return false;
2421}
2422
2423static bool wait_on_work(struct work_struct *work)
2424{
2425 bool ret = false;
2426 int cpu;
2427
2428 might_sleep();
2429
2430 lock_map_acquire(&work->lockdep_map);
2431 lock_map_release(&work->lockdep_map);
2432
2433 for_each_gcwq_cpu(cpu)
2434 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2435 return ret;
2436}
2437
Tejun Heo09383492010-09-16 10:48:29 +02002438/**
2439 * flush_work_sync - wait until a work has finished execution
2440 * @work: the work to flush
2441 *
2442 * Wait until @work has finished execution. On return, it's
2443 * guaranteed that all queueing instances of @work which happened
2444 * before this function is called are finished. In other words, if
2445 * @work hasn't been requeued since this function was called, @work is
2446 * guaranteed to be idle on return.
2447 *
2448 * RETURNS:
2449 * %true if flush_work_sync() waited for the work to finish execution,
2450 * %false if it was already idle.
2451 */
2452bool flush_work_sync(struct work_struct *work)
2453{
2454 struct wq_barrier barr;
2455 bool pending, waited;
2456
2457 /* we'll wait for executions separately, queue barr only if pending */
2458 pending = start_flush_work(work, &barr, false);
2459
2460 /* wait for executions to finish */
2461 waited = wait_on_work(work);
2462
2463 /* wait for the pending one */
2464 if (pending) {
2465 wait_for_completion(&barr.done);
2466 destroy_work_on_stack(&barr.work);
2467 }
2468
2469 return pending || waited;
2470}
2471EXPORT_SYMBOL_GPL(flush_work_sync);
2472
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002473/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002474 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002475 * so this work can't be re-armed in any way.
2476 */
2477static int try_to_grab_pending(struct work_struct *work)
2478{
Tejun Heo8b03ae32010-06-29 10:07:12 +02002479 struct global_cwq *gcwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002480 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002481
Tejun Heo22df02b2010-06-29 10:07:10 +02002482 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002483 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002484
2485 /*
2486 * The queueing is in progress, or it is already queued. Try to
2487 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2488 */
Tejun Heo7a22ad72010-06-29 10:07:13 +02002489 gcwq = get_work_gcwq(work);
2490 if (!gcwq)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002491 return ret;
2492
Tejun Heo8b03ae32010-06-29 10:07:12 +02002493 spin_lock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002494 if (!list_empty(&work->entry)) {
2495 /*
Tejun Heo7a22ad72010-06-29 10:07:13 +02002496 * This work is queued, but perhaps we locked the wrong gcwq.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002497 * In that case we must see the new value after rmb(), see
2498 * insert_work()->wmb().
2499 */
2500 smp_rmb();
Tejun Heo7a22ad72010-06-29 10:07:13 +02002501 if (gcwq == get_work_gcwq(work)) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09002502 debug_work_deactivate(work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002503 list_del_init(&work->entry);
Tejun Heo7a22ad72010-06-29 10:07:13 +02002504 cwq_dec_nr_in_flight(get_work_cwq(work),
Tejun Heo8a2e8e5d2010-08-25 10:33:56 +02002505 get_work_color(work),
2506 *work_data_bits(work) & WORK_STRUCT_DELAYED);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002507 ret = 1;
2508 }
2509 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02002510 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002511
2512 return ret;
2513}
2514
Tejun Heo401a8d02010-09-16 10:36:00 +02002515static bool __cancel_work_timer(struct work_struct *work,
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002516 struct timer_list* timer)
2517{
2518 int ret;
2519
2520 do {
2521 ret = (timer && likely(del_timer(timer)));
2522 if (!ret)
2523 ret = try_to_grab_pending(work);
2524 wait_on_work(work);
2525 } while (unlikely(ret < 0));
2526
Tejun Heo7a22ad72010-06-29 10:07:13 +02002527 clear_work_data(work);
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002528 return ret;
2529}
2530
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002531/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002532 * cancel_work_sync - cancel a work and wait for it to finish
2533 * @work: the work to cancel
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002534 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002535 * Cancel @work and wait for its execution to finish. This function
2536 * can be used even if the work re-queues itself or migrates to
2537 * another workqueue. On return from this function, @work is
2538 * guaranteed to be not pending or executing on any CPU.
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002539 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002540 * cancel_work_sync(&delayed_work->work) must not be used for
2541 * delayed_work's. Use cancel_delayed_work_sync() instead.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002542 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002543 * The caller must ensure that the workqueue on which @work was last
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002544 * queued can't be destroyed before this function returns.
Tejun Heo401a8d02010-09-16 10:36:00 +02002545 *
2546 * RETURNS:
2547 * %true if @work was pending, %false otherwise.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002548 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002549bool cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002550{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002551 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07002552}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07002553EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07002554
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002555/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002556 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2557 * @dwork: the delayed work to flush
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002558 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002559 * Delayed timer is cancelled and the pending work is queued for
2560 * immediate execution. Like flush_work(), this function only
2561 * considers the last queueing instance of @dwork.
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002562 *
Tejun Heo401a8d02010-09-16 10:36:00 +02002563 * RETURNS:
2564 * %true if flush_work() waited for the work to finish execution,
2565 * %false if it was already idle.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002566 */
Tejun Heo401a8d02010-09-16 10:36:00 +02002567bool flush_delayed_work(struct delayed_work *dwork)
2568{
2569 if (del_timer_sync(&dwork->timer))
2570 __queue_work(raw_smp_processor_id(),
2571 get_work_cwq(&dwork->work)->wq, &dwork->work);
2572 return flush_work(&dwork->work);
2573}
2574EXPORT_SYMBOL(flush_delayed_work);
2575
2576/**
Tejun Heo09383492010-09-16 10:48:29 +02002577 * flush_delayed_work_sync - wait for a dwork to finish
2578 * @dwork: the delayed work to flush
2579 *
2580 * Delayed timer is cancelled and the pending work is queued for
2581 * execution immediately. Other than timer handling, its behavior
2582 * is identical to flush_work_sync().
2583 *
2584 * RETURNS:
2585 * %true if flush_work_sync() waited for the work to finish execution,
2586 * %false if it was already idle.
2587 */
2588bool flush_delayed_work_sync(struct delayed_work *dwork)
2589{
2590 if (del_timer_sync(&dwork->timer))
2591 __queue_work(raw_smp_processor_id(),
2592 get_work_cwq(&dwork->work)->wq, &dwork->work);
2593 return flush_work_sync(&dwork->work);
2594}
2595EXPORT_SYMBOL(flush_delayed_work_sync);
2596
2597/**
Tejun Heo401a8d02010-09-16 10:36:00 +02002598 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2599 * @dwork: the delayed work cancel
2600 *
2601 * This is cancel_work_sync() for delayed works.
2602 *
2603 * RETURNS:
2604 * %true if @dwork was pending, %false otherwise.
2605 */
2606bool cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002607{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07002608 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07002609}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07002610EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002612/**
2613 * schedule_work - put work task in global workqueue
2614 * @work: job to be done
2615 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +02002616 * Returns zero if @work was already on the kernel-global workqueue and
2617 * non-zero otherwise.
2618 *
2619 * This puts a job in the kernel-global workqueue if it was not already
2620 * queued and leaves it in the same position on the kernel-global
2621 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002622 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002623int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624{
Tejun Heod320c032010-06-29 10:07:14 +02002625 return queue_work(system_wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626}
Dave Jonesae90dd52006-06-30 01:40:45 -04002627EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
Zhang Ruic1a220e2008-07-23 21:28:39 -07002629/*
2630 * schedule_work_on - put work task on a specific cpu
2631 * @cpu: cpu to put the work task on
2632 * @work: job to be done
2633 *
2634 * This puts a job on a specific cpu
2635 */
2636int schedule_work_on(int cpu, struct work_struct *work)
2637{
Tejun Heod320c032010-06-29 10:07:14 +02002638 return queue_work_on(cpu, system_wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -07002639}
2640EXPORT_SYMBOL(schedule_work_on);
2641
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002642/**
2643 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +00002644 * @dwork: job to be done
2645 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002646 *
2647 * After waiting for a given time this puts a job in the kernel-global
2648 * workqueue.
2649 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08002650int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -08002651 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
Tejun Heod320c032010-06-29 10:07:14 +02002653 return queue_delayed_work(system_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654}
Dave Jonesae90dd52006-06-30 01:40:45 -04002655EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002657/**
2658 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2659 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +00002660 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07002661 * @delay: number of jiffies to wait
2662 *
2663 * After waiting for a given time this puts a job in the kernel-global
2664 * workqueue on the specified CPU.
2665 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +00002667 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668{
Tejun Heod320c032010-06-29 10:07:14 +02002669 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670}
Dave Jonesae90dd52006-06-30 01:40:45 -04002671EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Andrew Mortonb6136772006-06-25 05:47:49 -07002673/**
2674 * schedule_on_each_cpu - call a function on each online CPU from keventd
2675 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -07002676 *
2677 * Returns zero on success.
2678 * Returns -ve errno on failure.
2679 *
Andrew Mortonb6136772006-06-25 05:47:49 -07002680 * schedule_on_each_cpu() is very slow.
2681 */
David Howells65f27f32006-11-22 14:55:48 +00002682int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -08002683{
2684 int cpu;
Namhyung Kim38f51562010-08-08 14:24:09 +02002685 struct work_struct __percpu *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -08002686
Andrew Mortonb6136772006-06-25 05:47:49 -07002687 works = alloc_percpu(struct work_struct);
2688 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -08002689 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -07002690
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002691 get_online_cpus();
Tejun Heo93981802009-11-17 14:06:20 -08002692
Christoph Lameter15316ba2006-01-08 01:00:43 -08002693 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +01002694 struct work_struct *work = per_cpu_ptr(works, cpu);
2695
2696 INIT_WORK(work, func);
Tejun Heob71ab8c2010-06-29 10:07:14 +02002697 schedule_work_on(cpu, work);
Andi Kleen65a64462009-10-14 06:22:47 +02002698 }
Tejun Heo93981802009-11-17 14:06:20 -08002699
2700 for_each_online_cpu(cpu)
2701 flush_work(per_cpu_ptr(works, cpu));
2702
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002703 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -07002704 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -08002705 return 0;
2706}
2707
Alan Sterneef6a7d2010-02-12 17:39:21 +09002708/**
2709 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2710 *
2711 * Forces execution of the kernel-global workqueue and blocks until its
2712 * completion.
2713 *
2714 * Think twice before calling this function! It's very easy to get into
2715 * trouble if you don't take great care. Either of the following situations
2716 * will lead to deadlock:
2717 *
2718 * One of the work items currently on the workqueue needs to acquire
2719 * a lock held by your code or its caller.
2720 *
2721 * Your code is running in the context of a work routine.
2722 *
2723 * They will be detected by lockdep when they occur, but the first might not
2724 * occur very often. It depends on what work items are on the workqueue and
2725 * what locks they need, which you have no control over.
2726 *
2727 * In most situations flushing the entire workqueue is overkill; you merely
2728 * need to know that a particular work item isn't queued and isn't running.
2729 * In such cases you should use cancel_delayed_work_sync() or
2730 * cancel_work_sync() instead.
2731 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732void flush_scheduled_work(void)
2733{
Tejun Heod320c032010-06-29 10:07:14 +02002734 flush_workqueue(system_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735}
Dave Jonesae90dd52006-06-30 01:40:45 -04002736EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
2738/**
James Bottomley1fa44ec2006-02-23 12:43:43 -06002739 * execute_in_process_context - reliably execute the routine with user context
2740 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -06002741 * @ew: guaranteed storage for the execute work structure (must
2742 * be available when the work executes)
2743 *
2744 * Executes the function immediately if process context is available,
2745 * otherwise schedules the function for delayed execution.
2746 *
2747 * Returns: 0 - function was executed
2748 * 1 - function was scheduled for execution
2749 */
David Howells65f27f32006-11-22 14:55:48 +00002750int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -06002751{
2752 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +00002753 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -06002754 return 0;
2755 }
2756
David Howells65f27f32006-11-22 14:55:48 +00002757 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -06002758 schedule_work(&ew->work);
2759
2760 return 1;
2761}
2762EXPORT_SYMBOL_GPL(execute_in_process_context);
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764int keventd_up(void)
2765{
Tejun Heod320c032010-06-29 10:07:14 +02002766 return system_wq != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767}
2768
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002769static int alloc_cwqs(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770{
Oleg Nesterov3af244332007-05-09 02:34:09 -07002771 /*
Tejun Heo0f900042010-06-29 10:07:11 +02002772 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2773 * Make sure that the alignment isn't lower than that of
2774 * unsigned long long.
Oleg Nesterov3af244332007-05-09 02:34:09 -07002775 */
Tejun Heo0f900042010-06-29 10:07:11 +02002776 const size_t size = sizeof(struct cpu_workqueue_struct);
2777 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2778 __alignof__(unsigned long long));
Tejun Heo931ac772010-07-20 11:07:48 +02002779#ifdef CONFIG_SMP
2780 bool percpu = !(wq->flags & WQ_UNBOUND);
2781#else
2782 bool percpu = false;
2783#endif
Oleg Nesterov3af244332007-05-09 02:34:09 -07002784
Tejun Heo931ac772010-07-20 11:07:48 +02002785 if (percpu)
Tejun Heof3421792010-07-02 10:03:51 +02002786 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
Tejun Heo931ac772010-07-20 11:07:48 +02002787 else {
Tejun Heof3421792010-07-02 10:03:51 +02002788 void *ptr;
Frederic Weisbeckere1d8aa92009-01-12 23:15:46 +01002789
Tejun Heof3421792010-07-02 10:03:51 +02002790 /*
2791 * Allocate enough room to align cwq and put an extra
2792 * pointer at the end pointing back to the originally
2793 * allocated pointer which will be used for free.
2794 */
2795 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2796 if (ptr) {
2797 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2798 *(void **)(wq->cpu_wq.single + 1) = ptr;
2799 }
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002800 }
Tejun Heof3421792010-07-02 10:03:51 +02002801
Tejun Heo0f900042010-06-29 10:07:11 +02002802 /* just in case, make sure it's actually aligned */
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002803 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2804 return wq->cpu_wq.v ? 0 : -ENOMEM;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002805}
2806
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002807static void free_cwqs(struct workqueue_struct *wq)
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07002808{
Tejun Heo931ac772010-07-20 11:07:48 +02002809#ifdef CONFIG_SMP
2810 bool percpu = !(wq->flags & WQ_UNBOUND);
2811#else
2812 bool percpu = false;
2813#endif
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07002814
Tejun Heo931ac772010-07-20 11:07:48 +02002815 if (percpu)
Tejun Heof3421792010-07-02 10:03:51 +02002816 free_percpu(wq->cpu_wq.pcpu);
2817 else if (wq->cpu_wq.single) {
2818 /* the pointer to free is stored right after the cwq */
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002819 kfree(*(void **)(wq->cpu_wq.single + 1));
Oleg Nesterov06ba38a2007-05-09 02:34:15 -07002820 }
2821}
2822
Tejun Heof3421792010-07-02 10:03:51 +02002823static int wq_clamp_max_active(int max_active, unsigned int flags,
2824 const char *name)
Tejun Heob71ab8c2010-06-29 10:07:14 +02002825{
Tejun Heof3421792010-07-02 10:03:51 +02002826 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2827
2828 if (max_active < 1 || max_active > lim)
Tejun Heob71ab8c2010-06-29 10:07:14 +02002829 printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2830 "is out of range, clamping between %d and %d\n",
Tejun Heof3421792010-07-02 10:03:51 +02002831 max_active, name, 1, lim);
Tejun Heob71ab8c2010-06-29 10:07:14 +02002832
Tejun Heof3421792010-07-02 10:03:51 +02002833 return clamp_val(max_active, 1, lim);
Tejun Heob71ab8c2010-06-29 10:07:14 +02002834}
2835
Tejun Heod320c032010-06-29 10:07:14 +02002836struct workqueue_struct *__alloc_workqueue_key(const char *name,
2837 unsigned int flags,
2838 int max_active,
2839 struct lock_class_key *key,
2840 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -07002841{
2842 struct workqueue_struct *wq;
Tejun Heoc34056a2010-06-29 10:07:11 +02002843 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002844
Tejun Heof3421792010-07-02 10:03:51 +02002845 /*
2846 * Unbound workqueues aren't concurrency managed and should be
2847 * dispatched to workers immediately.
2848 */
2849 if (flags & WQ_UNBOUND)
2850 flags |= WQ_HIGHPRI;
2851
Tejun Heod320c032010-06-29 10:07:14 +02002852 max_active = max_active ?: WQ_DFL_ACTIVE;
Tejun Heof3421792010-07-02 10:03:51 +02002853 max_active = wq_clamp_max_active(max_active, flags, name);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002854
2855 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2856 if (!wq)
Tejun Heo4690c4a2010-06-29 10:07:10 +02002857 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002858
Tejun Heo97e37d72010-06-29 10:07:10 +02002859 wq->flags = flags;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002860 wq->saved_max_active = max_active;
Tejun Heo73f53c42010-06-29 10:07:11 +02002861 mutex_init(&wq->flush_mutex);
2862 atomic_set(&wq->nr_cwqs_to_flush, 0);
2863 INIT_LIST_HEAD(&wq->flusher_queue);
2864 INIT_LIST_HEAD(&wq->flusher_overflow);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002865
2866 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +01002867 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -07002868 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002869
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002870 if (alloc_cwqs(wq) < 0)
2871 goto err;
2872
Tejun Heof3421792010-07-02 10:03:51 +02002873 for_each_cwq_cpu(cpu, wq) {
Tejun Heo15376632010-06-29 10:07:11 +02002874 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002875 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo15376632010-06-29 10:07:11 +02002876
Tejun Heo0f900042010-06-29 10:07:11 +02002877 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
Tejun Heo8b03ae32010-06-29 10:07:12 +02002878 cwq->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02002879 cwq->wq = wq;
Tejun Heo73f53c42010-06-29 10:07:11 +02002880 cwq->flush_color = -1;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002881 cwq->max_active = max_active;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002882 INIT_LIST_HEAD(&cwq->delayed_works);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002883 }
2884
Tejun Heoe22bee72010-06-29 10:07:14 +02002885 if (flags & WQ_RESCUER) {
2886 struct worker *rescuer;
2887
Tejun Heof2e005a2010-07-20 15:59:09 +02002888 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
Tejun Heoe22bee72010-06-29 10:07:14 +02002889 goto err;
2890
2891 wq->rescuer = rescuer = alloc_worker();
2892 if (!rescuer)
2893 goto err;
2894
2895 rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2896 if (IS_ERR(rescuer->task))
2897 goto err;
2898
Tejun Heoe22bee72010-06-29 10:07:14 +02002899 rescuer->task->flags |= PF_THREAD_BOUND;
2900 wake_up_process(rescuer->task);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002901 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07002902
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002903 /*
2904 * workqueue_lock protects global freeze state and workqueues
2905 * list. Grab it, set max_active accordingly and add the new
2906 * workqueue to workqueues list.
2907 */
Tejun Heo15376632010-06-29 10:07:11 +02002908 spin_lock(&workqueue_lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002909
2910 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
Tejun Heof3421792010-07-02 10:03:51 +02002911 for_each_cwq_cpu(cpu, wq)
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002912 get_cwq(cpu, wq)->max_active = 0;
2913
Tejun Heo15376632010-06-29 10:07:11 +02002914 list_add(&wq->list, &workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002915
Tejun Heo15376632010-06-29 10:07:11 +02002916 spin_unlock(&workqueue_lock);
2917
Oleg Nesterov3af244332007-05-09 02:34:09 -07002918 return wq;
Tejun Heo4690c4a2010-06-29 10:07:10 +02002919err:
2920 if (wq) {
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002921 free_cwqs(wq);
Tejun Heof2e005a2010-07-20 15:59:09 +02002922 free_mayday_mask(wq->mayday_mask);
Tejun Heoe22bee72010-06-29 10:07:14 +02002923 kfree(wq->rescuer);
Tejun Heo4690c4a2010-06-29 10:07:10 +02002924 kfree(wq);
2925 }
2926 return NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002927}
Tejun Heod320c032010-06-29 10:07:14 +02002928EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002929
2930/**
2931 * destroy_workqueue - safely terminate a workqueue
2932 * @wq: target workqueue
2933 *
2934 * Safely destroy a workqueue. All work currently pending will be done first.
2935 */
2936void destroy_workqueue(struct workqueue_struct *wq)
2937{
Tejun Heoc8e55f32010-06-29 10:07:12 +02002938 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002939
Tejun Heoe41e7042010-08-24 14:22:47 +02002940 wq->flags |= WQ_DYING;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02002941 flush_workqueue(wq);
2942
2943 /*
2944 * wq list is used to freeze wq, remove from list after
2945 * flushing is complete in case freeze races us.
2946 */
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002947 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07002948 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01002949 spin_unlock(&workqueue_lock);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002950
Tejun Heoe22bee72010-06-29 10:07:14 +02002951 /* sanity check */
Tejun Heof3421792010-07-02 10:03:51 +02002952 for_each_cwq_cpu(cpu, wq) {
Tejun Heo73f53c42010-06-29 10:07:11 +02002953 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2954 int i;
Oleg Nesterov3af244332007-05-09 02:34:09 -07002955
Tejun Heo73f53c42010-06-29 10:07:11 +02002956 for (i = 0; i < WORK_NR_COLORS; i++)
2957 BUG_ON(cwq->nr_in_flight[i]);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02002958 BUG_ON(cwq->nr_active);
2959 BUG_ON(!list_empty(&cwq->delayed_works));
Tejun Heo73f53c42010-06-29 10:07:11 +02002960 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07002961
Tejun Heoe22bee72010-06-29 10:07:14 +02002962 if (wq->flags & WQ_RESCUER) {
2963 kthread_stop(wq->rescuer->task);
Tejun Heof2e005a2010-07-20 15:59:09 +02002964 free_mayday_mask(wq->mayday_mask);
Xiaotian Feng8d9df9f2010-08-16 09:54:28 +02002965 kfree(wq->rescuer);
Tejun Heoe22bee72010-06-29 10:07:14 +02002966 }
2967
Tejun Heobdbc5dd2010-07-02 10:03:51 +02002968 free_cwqs(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07002969 kfree(wq);
2970}
2971EXPORT_SYMBOL_GPL(destroy_workqueue);
2972
Tejun Heodcd989c2010-06-29 10:07:14 +02002973/**
2974 * workqueue_set_max_active - adjust max_active of a workqueue
2975 * @wq: target workqueue
2976 * @max_active: new max_active value.
2977 *
2978 * Set max_active of @wq to @max_active.
2979 *
2980 * CONTEXT:
2981 * Don't call from IRQ context.
2982 */
2983void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
2984{
2985 unsigned int cpu;
2986
Tejun Heof3421792010-07-02 10:03:51 +02002987 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
Tejun Heodcd989c2010-06-29 10:07:14 +02002988
2989 spin_lock(&workqueue_lock);
2990
2991 wq->saved_max_active = max_active;
2992
Tejun Heof3421792010-07-02 10:03:51 +02002993 for_each_cwq_cpu(cpu, wq) {
Tejun Heodcd989c2010-06-29 10:07:14 +02002994 struct global_cwq *gcwq = get_gcwq(cpu);
2995
2996 spin_lock_irq(&gcwq->lock);
2997
2998 if (!(wq->flags & WQ_FREEZEABLE) ||
2999 !(gcwq->flags & GCWQ_FREEZING))
3000 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3001
3002 spin_unlock_irq(&gcwq->lock);
3003 }
3004
3005 spin_unlock(&workqueue_lock);
3006}
3007EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3008
3009/**
3010 * workqueue_congested - test whether a workqueue is congested
3011 * @cpu: CPU in question
3012 * @wq: target workqueue
3013 *
3014 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3015 * no synchronization around this function and the test result is
3016 * unreliable and only useful as advisory hints or for debugging.
3017 *
3018 * RETURNS:
3019 * %true if congested, %false otherwise.
3020 */
3021bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3022{
3023 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3024
3025 return !list_empty(&cwq->delayed_works);
3026}
3027EXPORT_SYMBOL_GPL(workqueue_congested);
3028
3029/**
3030 * work_cpu - return the last known associated cpu for @work
3031 * @work: the work of interest
3032 *
3033 * RETURNS:
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003034 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
Tejun Heodcd989c2010-06-29 10:07:14 +02003035 */
3036unsigned int work_cpu(struct work_struct *work)
3037{
3038 struct global_cwq *gcwq = get_work_gcwq(work);
3039
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003040 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
Tejun Heodcd989c2010-06-29 10:07:14 +02003041}
3042EXPORT_SYMBOL_GPL(work_cpu);
3043
3044/**
3045 * work_busy - test whether a work is currently pending or running
3046 * @work: the work to be tested
3047 *
3048 * Test whether @work is currently pending or running. There is no
3049 * synchronization around this function and the test result is
3050 * unreliable and only useful as advisory hints or for debugging.
3051 * Especially for reentrant wqs, the pending state might hide the
3052 * running state.
3053 *
3054 * RETURNS:
3055 * OR'd bitmask of WORK_BUSY_* bits.
3056 */
3057unsigned int work_busy(struct work_struct *work)
3058{
3059 struct global_cwq *gcwq = get_work_gcwq(work);
3060 unsigned long flags;
3061 unsigned int ret = 0;
3062
3063 if (!gcwq)
3064 return false;
3065
3066 spin_lock_irqsave(&gcwq->lock, flags);
3067
3068 if (work_pending(work))
3069 ret |= WORK_BUSY_PENDING;
3070 if (find_worker_executing_work(gcwq, work))
3071 ret |= WORK_BUSY_RUNNING;
3072
3073 spin_unlock_irqrestore(&gcwq->lock, flags);
3074
3075 return ret;
3076}
3077EXPORT_SYMBOL_GPL(work_busy);
3078
Tejun Heodb7bccf2010-06-29 10:07:12 +02003079/*
3080 * CPU hotplug.
3081 *
Tejun Heoe22bee72010-06-29 10:07:14 +02003082 * There are two challenges in supporting CPU hotplug. Firstly, there
3083 * are a lot of assumptions on strong associations among work, cwq and
3084 * gcwq which make migrating pending and scheduled works very
3085 * difficult to implement without impacting hot paths. Secondly,
3086 * gcwqs serve mix of short, long and very long running works making
3087 * blocked draining impractical.
3088 *
3089 * This is solved by allowing a gcwq to be detached from CPU, running
3090 * it with unbound (rogue) workers and allowing it to be reattached
3091 * later if the cpu comes back online. A separate thread is created
3092 * to govern a gcwq in such state and is called the trustee of the
3093 * gcwq.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003094 *
3095 * Trustee states and their descriptions.
3096 *
3097 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3098 * new trustee is started with this state.
3099 *
3100 * IN_CHARGE Once started, trustee will enter this state after
Tejun Heoe22bee72010-06-29 10:07:14 +02003101 * assuming the manager role and making all existing
3102 * workers rogue. DOWN_PREPARE waits for trustee to
3103 * enter this state. After reaching IN_CHARGE, trustee
3104 * tries to execute the pending worklist until it's empty
3105 * and the state is set to BUTCHER, or the state is set
3106 * to RELEASE.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003107 *
3108 * BUTCHER Command state which is set by the cpu callback after
3109 * the cpu has went down. Once this state is set trustee
3110 * knows that there will be no new works on the worklist
3111 * and once the worklist is empty it can proceed to
3112 * killing idle workers.
3113 *
3114 * RELEASE Command state which is set by the cpu callback if the
3115 * cpu down has been canceled or it has come online
3116 * again. After recognizing this state, trustee stops
Tejun Heoe22bee72010-06-29 10:07:14 +02003117 * trying to drain or butcher and clears ROGUE, rebinds
3118 * all remaining workers back to the cpu and releases
3119 * manager role.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003120 *
3121 * DONE Trustee will enter this state after BUTCHER or RELEASE
3122 * is complete.
3123 *
3124 * trustee CPU draining
3125 * took over down complete
3126 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3127 * | | ^
3128 * | CPU is back online v return workers |
3129 * ----------------> RELEASE --------------
3130 */
3131
3132/**
3133 * trustee_wait_event_timeout - timed event wait for trustee
3134 * @cond: condition to wait for
3135 * @timeout: timeout in jiffies
3136 *
3137 * wait_event_timeout() for trustee to use. Handles locking and
3138 * checks for RELEASE request.
3139 *
3140 * CONTEXT:
3141 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3142 * multiple times. To be used by trustee.
3143 *
3144 * RETURNS:
3145 * Positive indicating left time if @cond is satisfied, 0 if timed
3146 * out, -1 if canceled.
3147 */
3148#define trustee_wait_event_timeout(cond, timeout) ({ \
3149 long __ret = (timeout); \
3150 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3151 __ret) { \
3152 spin_unlock_irq(&gcwq->lock); \
3153 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3154 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3155 __ret); \
3156 spin_lock_irq(&gcwq->lock); \
3157 } \
3158 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3159})
3160
3161/**
3162 * trustee_wait_event - event wait for trustee
3163 * @cond: condition to wait for
3164 *
3165 * wait_event() for trustee to use. Automatically handles locking and
3166 * checks for CANCEL request.
3167 *
3168 * CONTEXT:
3169 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3170 * multiple times. To be used by trustee.
3171 *
3172 * RETURNS:
3173 * 0 if @cond is satisfied, -1 if canceled.
3174 */
3175#define trustee_wait_event(cond) ({ \
3176 long __ret1; \
3177 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3178 __ret1 < 0 ? -1 : 0; \
3179})
3180
3181static int __cpuinit trustee_thread(void *__gcwq)
3182{
3183 struct global_cwq *gcwq = __gcwq;
3184 struct worker *worker;
Tejun Heoe22bee72010-06-29 10:07:14 +02003185 struct work_struct *work;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003186 struct hlist_node *pos;
Tejun Heoe22bee72010-06-29 10:07:14 +02003187 long rc;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003188 int i;
3189
3190 BUG_ON(gcwq->cpu != smp_processor_id());
3191
3192 spin_lock_irq(&gcwq->lock);
3193 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02003194 * Claim the manager position and make all workers rogue.
3195 * Trustee must be bound to the target cpu and can't be
3196 * cancelled.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003197 */
3198 BUG_ON(gcwq->cpu != smp_processor_id());
Tejun Heoe22bee72010-06-29 10:07:14 +02003199 rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3200 BUG_ON(rc < 0);
3201
3202 gcwq->flags |= GCWQ_MANAGING_WORKERS;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003203
3204 list_for_each_entry(worker, &gcwq->idle_list, entry)
Tejun Heocb444762010-07-02 10:03:50 +02003205 worker->flags |= WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003206
3207 for_each_busy_worker(worker, i, pos, gcwq)
Tejun Heocb444762010-07-02 10:03:50 +02003208 worker->flags |= WORKER_ROGUE;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003209
3210 /*
Tejun Heoe22bee72010-06-29 10:07:14 +02003211 * Call schedule() so that we cross rq->lock and thus can
3212 * guarantee sched callbacks see the rogue flag. This is
3213 * necessary as scheduler callbacks may be invoked from other
3214 * cpus.
3215 */
3216 spin_unlock_irq(&gcwq->lock);
3217 schedule();
3218 spin_lock_irq(&gcwq->lock);
3219
3220 /*
Tejun Heocb444762010-07-02 10:03:50 +02003221 * Sched callbacks are disabled now. Zap nr_running. After
3222 * this, nr_running stays zero and need_more_worker() and
3223 * keep_working() are always true as long as the worklist is
3224 * not empty.
Tejun Heoe22bee72010-06-29 10:07:14 +02003225 */
Tejun Heocb444762010-07-02 10:03:50 +02003226 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
Tejun Heoe22bee72010-06-29 10:07:14 +02003227
3228 spin_unlock_irq(&gcwq->lock);
3229 del_timer_sync(&gcwq->idle_timer);
3230 spin_lock_irq(&gcwq->lock);
3231
3232 /*
Tejun Heodb7bccf2010-06-29 10:07:12 +02003233 * We're now in charge. Notify and proceed to drain. We need
3234 * to keep the gcwq running during the whole CPU down
3235 * procedure as other cpu hotunplug callbacks may need to
3236 * flush currently running tasks.
3237 */
3238 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3239 wake_up_all(&gcwq->trustee_wait);
3240
3241 /*
3242 * The original cpu is in the process of dying and may go away
3243 * anytime now. When that happens, we and all workers would
Tejun Heoe22bee72010-06-29 10:07:14 +02003244 * be migrated to other cpus. Try draining any left work. We
3245 * want to get it over with ASAP - spam rescuers, wake up as
3246 * many idlers as necessary and create new ones till the
3247 * worklist is empty. Note that if the gcwq is frozen, there
3248 * may be frozen works in freezeable cwqs. Don't declare
3249 * completion while frozen.
Tejun Heodb7bccf2010-06-29 10:07:12 +02003250 */
3251 while (gcwq->nr_workers != gcwq->nr_idle ||
3252 gcwq->flags & GCWQ_FREEZING ||
3253 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
Tejun Heoe22bee72010-06-29 10:07:14 +02003254 int nr_works = 0;
3255
3256 list_for_each_entry(work, &gcwq->worklist, entry) {
3257 send_mayday(work);
3258 nr_works++;
3259 }
3260
3261 list_for_each_entry(worker, &gcwq->idle_list, entry) {
3262 if (!nr_works--)
3263 break;
3264 wake_up_process(worker->task);
3265 }
3266
3267 if (need_to_create_worker(gcwq)) {
3268 spin_unlock_irq(&gcwq->lock);
3269 worker = create_worker(gcwq, false);
3270 spin_lock_irq(&gcwq->lock);
3271 if (worker) {
Tejun Heocb444762010-07-02 10:03:50 +02003272 worker->flags |= WORKER_ROGUE;
Tejun Heoe22bee72010-06-29 10:07:14 +02003273 start_worker(worker);
3274 }
3275 }
3276
Tejun Heodb7bccf2010-06-29 10:07:12 +02003277 /* give a breather */
3278 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3279 break;
3280 }
3281
Tejun Heoe22bee72010-06-29 10:07:14 +02003282 /*
3283 * Either all works have been scheduled and cpu is down, or
3284 * cpu down has already been canceled. Wait for and butcher
3285 * all workers till we're canceled.
3286 */
3287 do {
3288 rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3289 while (!list_empty(&gcwq->idle_list))
3290 destroy_worker(list_first_entry(&gcwq->idle_list,
3291 struct worker, entry));
3292 } while (gcwq->nr_workers && rc >= 0);
3293
3294 /*
3295 * At this point, either draining has completed and no worker
3296 * is left, or cpu down has been canceled or the cpu is being
3297 * brought back up. There shouldn't be any idle one left.
3298 * Tell the remaining busy ones to rebind once it finishes the
3299 * currently scheduled works by scheduling the rebind_work.
3300 */
3301 WARN_ON(!list_empty(&gcwq->idle_list));
3302
3303 for_each_busy_worker(worker, i, pos, gcwq) {
3304 struct work_struct *rebind_work = &worker->rebind_work;
3305
3306 /*
3307 * Rebind_work may race with future cpu hotplug
3308 * operations. Use a separate flag to mark that
3309 * rebinding is scheduled.
3310 */
Tejun Heocb444762010-07-02 10:03:50 +02003311 worker->flags |= WORKER_REBIND;
3312 worker->flags &= ~WORKER_ROGUE;
Tejun Heoe22bee72010-06-29 10:07:14 +02003313
3314 /* queue rebind_work, wq doesn't matter, use the default one */
3315 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3316 work_data_bits(rebind_work)))
3317 continue;
3318
3319 debug_work_activate(rebind_work);
Tejun Heod320c032010-06-29 10:07:14 +02003320 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
Tejun Heoe22bee72010-06-29 10:07:14 +02003321 worker->scheduled.next,
3322 work_color_to_flags(WORK_NO_COLOR));
3323 }
3324
3325 /* relinquish manager role */
3326 gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3327
Tejun Heodb7bccf2010-06-29 10:07:12 +02003328 /* notify completion */
3329 gcwq->trustee = NULL;
3330 gcwq->trustee_state = TRUSTEE_DONE;
3331 wake_up_all(&gcwq->trustee_wait);
3332 spin_unlock_irq(&gcwq->lock);
3333 return 0;
3334}
3335
3336/**
3337 * wait_trustee_state - wait for trustee to enter the specified state
3338 * @gcwq: gcwq the trustee of interest belongs to
3339 * @state: target state to wait for
3340 *
3341 * Wait for the trustee to reach @state. DONE is already matched.
3342 *
3343 * CONTEXT:
3344 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3345 * multiple times. To be used by cpu_callback.
3346 */
3347static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
Namhyung Kim06bd6eb2010-08-22 23:19:42 +09003348__releases(&gcwq->lock)
3349__acquires(&gcwq->lock)
Tejun Heodb7bccf2010-06-29 10:07:12 +02003350{
3351 if (!(gcwq->trustee_state == state ||
3352 gcwq->trustee_state == TRUSTEE_DONE)) {
3353 spin_unlock_irq(&gcwq->lock);
3354 __wait_event(gcwq->trustee_wait,
3355 gcwq->trustee_state == state ||
3356 gcwq->trustee_state == TRUSTEE_DONE);
3357 spin_lock_irq(&gcwq->lock);
3358 }
3359}
3360
Oleg Nesterov3af244332007-05-09 02:34:09 -07003361static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3362 unsigned long action,
3363 void *hcpu)
3364{
3365 unsigned int cpu = (unsigned long)hcpu;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003366 struct global_cwq *gcwq = get_gcwq(cpu);
3367 struct task_struct *new_trustee = NULL;
Tejun Heoe22bee72010-06-29 10:07:14 +02003368 struct worker *uninitialized_var(new_worker);
Tejun Heodb7bccf2010-06-29 10:07:12 +02003369 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003371 action &= ~CPU_TASKS_FROZEN;
3372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 switch (action) {
Tejun Heodb7bccf2010-06-29 10:07:12 +02003374 case CPU_DOWN_PREPARE:
3375 new_trustee = kthread_create(trustee_thread, gcwq,
3376 "workqueue_trustee/%d\n", cpu);
3377 if (IS_ERR(new_trustee))
3378 return notifier_from_errno(PTR_ERR(new_trustee));
3379 kthread_bind(new_trustee, cpu);
Tejun Heoe22bee72010-06-29 10:07:14 +02003380 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 case CPU_UP_PREPARE:
Tejun Heoe22bee72010-06-29 10:07:14 +02003382 BUG_ON(gcwq->first_idle);
3383 new_worker = create_worker(gcwq, false);
3384 if (!new_worker) {
3385 if (new_trustee)
3386 kthread_stop(new_trustee);
3387 return NOTIFY_BAD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 }
3390
Tejun Heodb7bccf2010-06-29 10:07:12 +02003391 /* some are called w/ irq disabled, don't disturb irq status */
3392 spin_lock_irqsave(&gcwq->lock, flags);
3393
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07003394 switch (action) {
Tejun Heodb7bccf2010-06-29 10:07:12 +02003395 case CPU_DOWN_PREPARE:
3396 /* initialize trustee and tell it to acquire the gcwq */
3397 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3398 gcwq->trustee = new_trustee;
3399 gcwq->trustee_state = TRUSTEE_START;
3400 wake_up_process(gcwq->trustee);
3401 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
Tejun Heoe22bee72010-06-29 10:07:14 +02003402 /* fall through */
3403 case CPU_UP_PREPARE:
3404 BUG_ON(gcwq->first_idle);
3405 gcwq->first_idle = new_worker;
3406 break;
3407
3408 case CPU_DYING:
3409 /*
3410 * Before this, the trustee and all workers except for
3411 * the ones which are still executing works from
3412 * before the last CPU down must be on the cpu. After
3413 * this, they'll all be diasporas.
3414 */
3415 gcwq->flags |= GCWQ_DISASSOCIATED;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003416 break;
3417
Oleg Nesterov3da1c842008-07-25 01:47:50 -07003418 case CPU_POST_DEAD:
Tejun Heodb7bccf2010-06-29 10:07:12 +02003419 gcwq->trustee_state = TRUSTEE_BUTCHER;
Tejun Heoe22bee72010-06-29 10:07:14 +02003420 /* fall through */
3421 case CPU_UP_CANCELED:
3422 destroy_worker(gcwq->first_idle);
3423 gcwq->first_idle = NULL;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003424 break;
3425
3426 case CPU_DOWN_FAILED:
3427 case CPU_ONLINE:
Tejun Heoe22bee72010-06-29 10:07:14 +02003428 gcwq->flags &= ~GCWQ_DISASSOCIATED;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003429 if (gcwq->trustee_state != TRUSTEE_DONE) {
3430 gcwq->trustee_state = TRUSTEE_RELEASE;
3431 wake_up_process(gcwq->trustee);
3432 wait_trustee_state(gcwq, TRUSTEE_DONE);
3433 }
3434
Tejun Heoe22bee72010-06-29 10:07:14 +02003435 /*
3436 * Trustee is done and there might be no worker left.
3437 * Put the first_idle in and request a real manager to
3438 * take a look.
3439 */
3440 spin_unlock_irq(&gcwq->lock);
3441 kthread_bind(gcwq->first_idle->task, cpu);
3442 spin_lock_irq(&gcwq->lock);
3443 gcwq->flags |= GCWQ_MANAGE_WORKERS;
3444 start_worker(gcwq->first_idle);
3445 gcwq->first_idle = NULL;
Tejun Heodb7bccf2010-06-29 10:07:12 +02003446 break;
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -07003447 }
3448
Tejun Heodb7bccf2010-06-29 10:07:12 +02003449 spin_unlock_irqrestore(&gcwq->lock, flags);
3450
Tejun Heo15376632010-06-29 10:07:11 +02003451 return notifier_from_errno(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453
Rusty Russell2d3854a2008-11-05 13:39:10 +11003454#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -08003455
Rusty Russell2d3854a2008-11-05 13:39:10 +11003456struct work_for_cpu {
Andrew Morton6b440032009-04-09 09:50:37 -06003457 struct completion completion;
Rusty Russell2d3854a2008-11-05 13:39:10 +11003458 long (*fn)(void *);
3459 void *arg;
3460 long ret;
3461};
3462
Andrew Morton6b440032009-04-09 09:50:37 -06003463static int do_work_for_cpu(void *_wfc)
Rusty Russell2d3854a2008-11-05 13:39:10 +11003464{
Andrew Morton6b440032009-04-09 09:50:37 -06003465 struct work_for_cpu *wfc = _wfc;
Rusty Russell2d3854a2008-11-05 13:39:10 +11003466 wfc->ret = wfc->fn(wfc->arg);
Andrew Morton6b440032009-04-09 09:50:37 -06003467 complete(&wfc->completion);
3468 return 0;
Rusty Russell2d3854a2008-11-05 13:39:10 +11003469}
3470
3471/**
3472 * work_on_cpu - run a function in user context on a particular cpu
3473 * @cpu: the cpu to run on
3474 * @fn: the function to run
3475 * @arg: the function arg
3476 *
Rusty Russell31ad9082009-01-16 15:31:15 -08003477 * This will return the value @fn returns.
3478 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b440032009-04-09 09:50:37 -06003479 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11003480 */
3481long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3482{
Andrew Morton6b440032009-04-09 09:50:37 -06003483 struct task_struct *sub_thread;
3484 struct work_for_cpu wfc = {
3485 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3486 .fn = fn,
3487 .arg = arg,
3488 };
Rusty Russell2d3854a2008-11-05 13:39:10 +11003489
Andrew Morton6b440032009-04-09 09:50:37 -06003490 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3491 if (IS_ERR(sub_thread))
3492 return PTR_ERR(sub_thread);
3493 kthread_bind(sub_thread, cpu);
3494 wake_up_process(sub_thread);
3495 wait_for_completion(&wfc.completion);
Rusty Russell2d3854a2008-11-05 13:39:10 +11003496 return wfc.ret;
3497}
3498EXPORT_SYMBOL_GPL(work_on_cpu);
3499#endif /* CONFIG_SMP */
3500
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003501#ifdef CONFIG_FREEZER
Rusty Russelle7577c52009-01-01 10:12:25 +10303502
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003503/**
3504 * freeze_workqueues_begin - begin freezing workqueues
3505 *
3506 * Start freezing workqueues. After this function returns, all
3507 * freezeable workqueues will queue new works to their frozen_works
Tejun Heo7e116292010-06-29 10:07:13 +02003508 * list instead of gcwq->worklist.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003509 *
3510 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02003511 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003512 */
3513void freeze_workqueues_begin(void)
3514{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003515 unsigned int cpu;
3516
3517 spin_lock(&workqueue_lock);
3518
3519 BUG_ON(workqueue_freezing);
3520 workqueue_freezing = true;
3521
Tejun Heof3421792010-07-02 10:03:51 +02003522 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003523 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003524 struct workqueue_struct *wq;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003525
3526 spin_lock_irq(&gcwq->lock);
3527
Tejun Heodb7bccf2010-06-29 10:07:12 +02003528 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3529 gcwq->flags |= GCWQ_FREEZING;
3530
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003531 list_for_each_entry(wq, &workqueues, list) {
3532 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3533
Tejun Heof3421792010-07-02 10:03:51 +02003534 if (cwq && wq->flags & WQ_FREEZEABLE)
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003535 cwq->max_active = 0;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003536 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02003537
3538 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003539 }
3540
3541 spin_unlock(&workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542}
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003543
3544/**
3545 * freeze_workqueues_busy - are freezeable workqueues still busy?
3546 *
3547 * Check whether freezing is complete. This function must be called
3548 * between freeze_workqueues_begin() and thaw_workqueues().
3549 *
3550 * CONTEXT:
3551 * Grabs and releases workqueue_lock.
3552 *
3553 * RETURNS:
3554 * %true if some freezeable workqueues are still busy. %false if
3555 * freezing is complete.
3556 */
3557bool freeze_workqueues_busy(void)
3558{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003559 unsigned int cpu;
3560 bool busy = false;
3561
3562 spin_lock(&workqueue_lock);
3563
3564 BUG_ON(!workqueue_freezing);
3565
Tejun Heof3421792010-07-02 10:03:51 +02003566 for_each_gcwq_cpu(cpu) {
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003567 struct workqueue_struct *wq;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003568 /*
3569 * nr_active is monotonically decreasing. It's safe
3570 * to peek without lock.
3571 */
3572 list_for_each_entry(wq, &workqueues, list) {
3573 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3574
Tejun Heof3421792010-07-02 10:03:51 +02003575 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003576 continue;
3577
3578 BUG_ON(cwq->nr_active < 0);
3579 if (cwq->nr_active) {
3580 busy = true;
3581 goto out_unlock;
3582 }
3583 }
3584 }
3585out_unlock:
3586 spin_unlock(&workqueue_lock);
3587 return busy;
3588}
3589
3590/**
3591 * thaw_workqueues - thaw workqueues
3592 *
3593 * Thaw workqueues. Normal queueing is restored and all collected
Tejun Heo7e116292010-06-29 10:07:13 +02003594 * frozen works are transferred to their respective gcwq worklists.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003595 *
3596 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02003597 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003598 */
3599void thaw_workqueues(void)
3600{
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003601 unsigned int cpu;
3602
3603 spin_lock(&workqueue_lock);
3604
3605 if (!workqueue_freezing)
3606 goto out_unlock;
3607
Tejun Heof3421792010-07-02 10:03:51 +02003608 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003609 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heobdbc5dd2010-07-02 10:03:51 +02003610 struct workqueue_struct *wq;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003611
3612 spin_lock_irq(&gcwq->lock);
3613
Tejun Heodb7bccf2010-06-29 10:07:12 +02003614 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3615 gcwq->flags &= ~GCWQ_FREEZING;
3616
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003617 list_for_each_entry(wq, &workqueues, list) {
3618 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3619
Tejun Heof3421792010-07-02 10:03:51 +02003620 if (!cwq || !(wq->flags & WQ_FREEZEABLE))
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003621 continue;
3622
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003623 /* restore max_active and repopulate worklist */
3624 cwq->max_active = wq->saved_max_active;
3625
3626 while (!list_empty(&cwq->delayed_works) &&
3627 cwq->nr_active < cwq->max_active)
3628 cwq_activate_first_delayed(cwq);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003629 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02003630
Tejun Heoe22bee72010-06-29 10:07:14 +02003631 wake_up_worker(gcwq);
3632
Tejun Heo8b03ae32010-06-29 10:07:12 +02003633 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02003634 }
3635
3636 workqueue_freezing = false;
3637out_unlock:
3638 spin_unlock(&workqueue_lock);
3639}
3640#endif /* CONFIG_FREEZER */
3641
Suresh Siddha6ee05782010-07-30 14:57:37 -07003642static int __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643{
Tejun Heoc34056a2010-06-29 10:07:11 +02003644 unsigned int cpu;
Tejun Heoc8e55f32010-06-29 10:07:12 +02003645 int i;
Tejun Heoc34056a2010-06-29 10:07:11 +02003646
Tejun Heof6500942010-08-09 11:50:34 +02003647 cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003648
3649 /* initialize gcwqs */
Tejun Heof3421792010-07-02 10:03:51 +02003650 for_each_gcwq_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02003651 struct global_cwq *gcwq = get_gcwq(cpu);
3652
3653 spin_lock_init(&gcwq->lock);
Tejun Heo7e116292010-06-29 10:07:13 +02003654 INIT_LIST_HEAD(&gcwq->worklist);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003655 gcwq->cpu = cpu;
Tejun Heo477a3c32010-08-31 10:54:35 +02003656 gcwq->flags |= GCWQ_DISASSOCIATED;
Tejun Heo8b03ae32010-06-29 10:07:12 +02003657
Tejun Heoc8e55f32010-06-29 10:07:12 +02003658 INIT_LIST_HEAD(&gcwq->idle_list);
3659 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3660 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3661
Tejun Heoe22bee72010-06-29 10:07:14 +02003662 init_timer_deferrable(&gcwq->idle_timer);
3663 gcwq->idle_timer.function = idle_worker_timeout;
3664 gcwq->idle_timer.data = (unsigned long)gcwq;
3665
3666 setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3667 (unsigned long)gcwq);
3668
Tejun Heo8b03ae32010-06-29 10:07:12 +02003669 ida_init(&gcwq->worker_ida);
Tejun Heodb7bccf2010-06-29 10:07:12 +02003670
3671 gcwq->trustee_state = TRUSTEE_DONE;
3672 init_waitqueue_head(&gcwq->trustee_wait);
Tejun Heo8b03ae32010-06-29 10:07:12 +02003673 }
3674
Tejun Heoe22bee72010-06-29 10:07:14 +02003675 /* create the initial worker */
Tejun Heof3421792010-07-02 10:03:51 +02003676 for_each_online_gcwq_cpu(cpu) {
Tejun Heoe22bee72010-06-29 10:07:14 +02003677 struct global_cwq *gcwq = get_gcwq(cpu);
3678 struct worker *worker;
3679
Tejun Heo477a3c32010-08-31 10:54:35 +02003680 if (cpu != WORK_CPU_UNBOUND)
3681 gcwq->flags &= ~GCWQ_DISASSOCIATED;
Tejun Heoe22bee72010-06-29 10:07:14 +02003682 worker = create_worker(gcwq, true);
3683 BUG_ON(!worker);
3684 spin_lock_irq(&gcwq->lock);
3685 start_worker(worker);
3686 spin_unlock_irq(&gcwq->lock);
3687 }
3688
Tejun Heod320c032010-06-29 10:07:14 +02003689 system_wq = alloc_workqueue("events", 0, 0);
3690 system_long_wq = alloc_workqueue("events_long", 0, 0);
3691 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
Tejun Heof3421792010-07-02 10:03:51 +02003692 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3693 WQ_UNBOUND_MAX_ACTIVE);
Tejun Heod320c032010-06-29 10:07:14 +02003694 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
Suresh Siddha6ee05782010-07-30 14:57:37 -07003695 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696}
Suresh Siddha6ee05782010-07-30 14:57:37 -07003697early_initcall(init_workqueues);