ARM: tegra: cpuquiet: fix race condition
[linux-2.6.git] / kernel / kthread.c
1 /* Kernel thread helper functions.
2  *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
3  *
4  * Creation is done via kthreadd, so that we get a clean environment
5  * even if we're invoked from userspace (think modprobe, hotplug cpu,
6  * etc.).
7  */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <linux/preempt.h>
20 #include <trace/events/sched.h>
21
22 static DEFINE_SPINLOCK(kthread_create_lock);
23 static LIST_HEAD(kthread_create_list);
24 struct task_struct *kthreadd_task;
25
26 struct kthread_create_info
27 {
28         /* Information passed to kthread() from kthreadd. */
29         int (*threadfn)(void *data);
30         void *data;
31         int node;
32
33         /* Result passed back to kthread_create() from kthreadd. */
34         struct task_struct *result;
35         struct completion done;
36
37         struct list_head list;
38 };
39
40 struct kthread {
41         int should_stop;
42         void *data;
43         struct completion exited;
44 };
45
46 #define to_kthread(tsk) \
47         container_of((tsk)->vfork_done, struct kthread, exited)
48
49 /**
50  * kthread_should_stop - should this kthread return now?
51  *
52  * When someone calls kthread_stop() on your kthread, it will be woken
53  * and this will return true.  You should then return, and your return
54  * value will be passed through to kthread_stop().
55  */
56 int kthread_should_stop(void)
57 {
58         return to_kthread(current)->should_stop;
59 }
60 EXPORT_SYMBOL(kthread_should_stop);
61
62 /**
63  * kthread_freezable_should_stop - should this freezable kthread return now?
64  * @was_frozen: optional out parameter, indicates whether %current was frozen
65  *
66  * kthread_should_stop() for freezable kthreads, which will enter
67  * refrigerator if necessary.  This function is safe from kthread_stop() /
68  * freezer deadlock and freezable kthreads should use this function instead
69  * of calling try_to_freeze() directly.
70  */
71 bool kthread_freezable_should_stop(bool *was_frozen)
72 {
73         bool frozen = false;
74
75         might_sleep();
76
77         if (unlikely(freezing(current)))
78                 frozen = __refrigerator(true);
79
80         if (was_frozen)
81                 *was_frozen = frozen;
82
83         return kthread_should_stop();
84 }
85 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
86
87 /**
88  * kthread_data - return data value specified on kthread creation
89  * @task: kthread task in question
90  *
91  * Return the data value specified when kthread @task was created.
92  * The caller is responsible for ensuring the validity of @task when
93  * calling this function.
94  */
95 void *kthread_data(struct task_struct *task)
96 {
97         return to_kthread(task)->data;
98 }
99
100 static int kthread(void *_create)
101 {
102         /* Copy data: it's on kthread's stack */
103         struct kthread_create_info *create = _create;
104         int (*threadfn)(void *data) = create->threadfn;
105         void *data = create->data;
106         struct kthread self;
107         int ret;
108
109         self.should_stop = 0;
110         self.data = data;
111         init_completion(&self.exited);
112         current->vfork_done = &self.exited;
113
114         /* OK, tell user we're spawned, wait for stop or wakeup */
115         __set_current_state(TASK_UNINTERRUPTIBLE);
116         create->result = current;
117
118         /*
119          * Disable preemption so we enter TASK_UNINTERRUPTIBLE after
120          * complete() instead of possibly being preempted. This speeds
121          * up clients that do a kthread_bind() directly after
122          * creation.
123          */
124         preempt_disable();
125         complete(&create->done);
126         preempt_enable_no_resched();
127
128         schedule();
129
130         ret = -EINTR;
131         if (!self.should_stop)
132                 ret = threadfn(data);
133
134         /* we can't just return, we must preserve "self" on stack */
135         do_exit(ret);
136 }
137
138 /* called from do_fork() to get node information for about to be created task */
139 int tsk_fork_get_node(struct task_struct *tsk)
140 {
141 #ifdef CONFIG_NUMA
142         if (tsk == kthreadd_task)
143                 return tsk->pref_node_fork;
144 #endif
145         return numa_node_id();
146 }
147
148 static void create_kthread(struct kthread_create_info *create)
149 {
150         int pid;
151
152 #ifdef CONFIG_NUMA
153         current->pref_node_fork = create->node;
154 #endif
155         /* We want our own signal handler (we take no signals by default). */
156         pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
157         if (pid < 0) {
158                 create->result = ERR_PTR(pid);
159                 complete(&create->done);
160         }
161 }
162
163 /**
164  * kthread_create_on_node - create a kthread.
165  * @threadfn: the function to run until signal_pending(current).
166  * @data: data ptr for @threadfn.
167  * @node: memory node number.
168  * @namefmt: printf-style name for the thread.
169  *
170  * Description: This helper function creates and names a kernel
171  * thread.  The thread will be stopped: use wake_up_process() to start
172  * it.  See also kthread_run().
173  *
174  * If thread is going to be bound on a particular cpu, give its node
175  * in @node, to get NUMA affinity for kthread stack, or else give -1.
176  * When woken, the thread will run @threadfn() with @data as its
177  * argument. @threadfn() can either call do_exit() directly if it is a
178  * standalone thread for which no one will call kthread_stop(), or
179  * return when 'kthread_should_stop()' is true (which means
180  * kthread_stop() has been called).  The return value should be zero
181  * or a negative error number; it will be passed to kthread_stop().
182  *
183  * Returns a task_struct or ERR_PTR(-ENOMEM).
184  */
185 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
186                                            void *data,
187                                            int node,
188                                            const char namefmt[],
189                                            ...)
190 {
191         struct kthread_create_info create;
192
193         create.threadfn = threadfn;
194         create.data = data;
195         create.node = node;
196         init_completion(&create.done);
197
198         spin_lock(&kthread_create_lock);
199         list_add_tail(&create.list, &kthread_create_list);
200         spin_unlock(&kthread_create_lock);
201
202         wake_up_process(kthreadd_task);
203         wait_for_completion(&create.done);
204
205         if (!IS_ERR(create.result)) {
206                 static const struct sched_param param = { .sched_priority = 0 };
207                 va_list args;
208
209                 va_start(args, namefmt);
210                 vsnprintf(create.result->comm, sizeof(create.result->comm),
211                           namefmt, args);
212                 va_end(args);
213                 /*
214                  * root may have changed our (kthreadd's) priority or CPU mask.
215                  * The kernel thread should not inherit these properties.
216                  */
217                 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
218                 set_cpus_allowed_ptr(create.result, cpu_all_mask);
219         }
220         return create.result;
221 }
222 EXPORT_SYMBOL(kthread_create_on_node);
223
224 /**
225  * kthread_bind - bind a just-created kthread to a cpu.
226  * @p: thread created by kthread_create().
227  * @cpu: cpu (might not be online, must be possible) for @k to run on.
228  *
229  * Description: This function is equivalent to set_cpus_allowed(),
230  * except that @cpu doesn't need to be online, and the thread must be
231  * stopped (i.e., just returned from kthread_create()).
232  */
233 void kthread_bind(struct task_struct *p, unsigned int cpu)
234 {
235         /* Must have done schedule() in kthread() before we set_task_cpu */
236         if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
237                 WARN_ON(1);
238                 return;
239         }
240
241         /* It's safe because the task is inactive. */
242         do_set_cpus_allowed(p, cpumask_of(cpu));
243         p->flags |= PF_THREAD_BOUND;
244 }
245 EXPORT_SYMBOL(kthread_bind);
246
247 /**
248  * kthread_stop - stop a thread created by kthread_create().
249  * @k: thread created by kthread_create().
250  *
251  * Sets kthread_should_stop() for @k to return true, wakes it, and
252  * waits for it to exit. This can also be called after kthread_create()
253  * instead of calling wake_up_process(): the thread will exit without
254  * calling threadfn().
255  *
256  * If threadfn() may call do_exit() itself, the caller must ensure
257  * task_struct can't go away.
258  *
259  * Returns the result of threadfn(), or %-EINTR if wake_up_process()
260  * was never called.
261  */
262 int kthread_stop(struct task_struct *k)
263 {
264         struct kthread *kthread;
265         int ret;
266
267         trace_sched_kthread_stop(k);
268         get_task_struct(k);
269
270         kthread = to_kthread(k);
271         barrier(); /* it might have exited */
272         if (k->vfork_done != NULL) {
273                 kthread->should_stop = 1;
274                 wake_up_process(k);
275                 wait_for_completion(&kthread->exited);
276         }
277         ret = k->exit_code;
278
279         put_task_struct(k);
280         trace_sched_kthread_stop_ret(ret);
281
282         return ret;
283 }
284 EXPORT_SYMBOL(kthread_stop);
285
286 int kthreadd(void *unused)
287 {
288         struct task_struct *tsk = current;
289
290         /* Setup a clean context for our children to inherit. */
291         set_task_comm(tsk, "kthreadd");
292         ignore_signals(tsk);
293         set_cpus_allowed_ptr(tsk, cpu_all_mask);
294         set_mems_allowed(node_states[N_HIGH_MEMORY]);
295
296         current->flags |= PF_NOFREEZE;
297
298         for (;;) {
299                 set_current_state(TASK_INTERRUPTIBLE);
300                 if (list_empty(&kthread_create_list))
301                         schedule();
302                 __set_current_state(TASK_RUNNING);
303
304                 spin_lock(&kthread_create_lock);
305                 while (!list_empty(&kthread_create_list)) {
306                         struct kthread_create_info *create;
307
308                         create = list_entry(kthread_create_list.next,
309                                             struct kthread_create_info, list);
310                         list_del_init(&create->list);
311                         spin_unlock(&kthread_create_lock);
312
313                         create_kthread(create);
314
315                         spin_lock(&kthread_create_lock);
316                 }
317                 spin_unlock(&kthread_create_lock);
318         }
319
320         return 0;
321 }
322
323 void __init_kthread_worker(struct kthread_worker *worker,
324                                 const char *name,
325                                 struct lock_class_key *key)
326 {
327         spin_lock_init(&worker->lock);
328         lockdep_set_class_and_name(&worker->lock, key, name);
329         INIT_LIST_HEAD(&worker->work_list);
330         worker->task = NULL;
331 }
332 EXPORT_SYMBOL_GPL(__init_kthread_worker);
333
334 /**
335  * kthread_worker_fn - kthread function to process kthread_worker
336  * @worker_ptr: pointer to initialized kthread_worker
337  *
338  * This function can be used as @threadfn to kthread_create() or
339  * kthread_run() with @worker_ptr argument pointing to an initialized
340  * kthread_worker.  The started kthread will process work_list until
341  * the it is stopped with kthread_stop().  A kthread can also call
342  * this function directly after extra initialization.
343  *
344  * Different kthreads can be used for the same kthread_worker as long
345  * as there's only one kthread attached to it at any given time.  A
346  * kthread_worker without an attached kthread simply collects queued
347  * kthread_works.
348  */
349 int kthread_worker_fn(void *worker_ptr)
350 {
351         struct kthread_worker *worker = worker_ptr;
352         struct kthread_work *work;
353
354         WARN_ON(worker->task);
355         worker->task = current;
356 repeat:
357         set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
358
359         if (kthread_should_stop()) {
360                 __set_current_state(TASK_RUNNING);
361                 spin_lock_irq(&worker->lock);
362                 worker->task = NULL;
363                 spin_unlock_irq(&worker->lock);
364                 return 0;
365         }
366
367         work = NULL;
368         spin_lock_irq(&worker->lock);
369         if (!list_empty(&worker->work_list)) {
370                 work = list_first_entry(&worker->work_list,
371                                         struct kthread_work, node);
372                 list_del_init(&work->node);
373         }
374         worker->current_work = work;
375         spin_unlock_irq(&worker->lock);
376
377         if (work) {
378                 __set_current_state(TASK_RUNNING);
379                 work->func(work);
380         } else if (!freezing(current))
381                 schedule();
382
383         try_to_freeze();
384         goto repeat;
385 }
386 EXPORT_SYMBOL_GPL(kthread_worker_fn);
387
388 /* insert @work before @pos in @worker */
389 static void insert_kthread_work(struct kthread_worker *worker,
390                                struct kthread_work *work,
391                                struct list_head *pos)
392 {
393         lockdep_assert_held(&worker->lock);
394
395         list_add_tail(&work->node, pos);
396         work->worker = worker;
397         if (likely(worker->task))
398                 wake_up_process(worker->task);
399 }
400
401 /**
402  * queue_kthread_work - queue a kthread_work
403  * @worker: target kthread_worker
404  * @work: kthread_work to queue
405  *
406  * Queue @work to work processor @task for async execution.  @task
407  * must have been created with kthread_worker_create().  Returns %true
408  * if @work was successfully queued, %false if it was already pending.
409  */
410 bool queue_kthread_work(struct kthread_worker *worker,
411                         struct kthread_work *work)
412 {
413         bool ret = false;
414         unsigned long flags;
415
416         spin_lock_irqsave(&worker->lock, flags);
417         if (list_empty(&work->node)) {
418                 insert_kthread_work(worker, work, &worker->work_list);
419                 ret = true;
420         }
421         spin_unlock_irqrestore(&worker->lock, flags);
422         return ret;
423 }
424 EXPORT_SYMBOL_GPL(queue_kthread_work);
425
426 struct kthread_flush_work {
427         struct kthread_work     work;
428         struct completion       done;
429 };
430
431 static void kthread_flush_work_fn(struct kthread_work *work)
432 {
433         struct kthread_flush_work *fwork =
434                 container_of(work, struct kthread_flush_work, work);
435         complete(&fwork->done);
436 }
437
438 /**
439  * flush_kthread_work - flush a kthread_work
440  * @work: work to flush
441  *
442  * If @work is queued or executing, wait for it to finish execution.
443  */
444 void flush_kthread_work(struct kthread_work *work)
445 {
446         struct kthread_flush_work fwork = {
447                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
448                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
449         };
450         struct kthread_worker *worker;
451         bool noop = false;
452
453 retry:
454         worker = work->worker;
455         if (!worker)
456                 return;
457
458         spin_lock_irq(&worker->lock);
459         if (work->worker != worker) {
460                 spin_unlock_irq(&worker->lock);
461                 goto retry;
462         }
463
464         if (!list_empty(&work->node))
465                 insert_kthread_work(worker, &fwork.work, work->node.next);
466         else if (worker->current_work == work)
467                 insert_kthread_work(worker, &fwork.work, worker->work_list.next);
468         else
469                 noop = true;
470
471         spin_unlock_irq(&worker->lock);
472
473         if (!noop)
474                 wait_for_completion(&fwork.done);
475 }
476 EXPORT_SYMBOL_GPL(flush_kthread_work);
477
478 /**
479  * flush_kthread_worker - flush all current works on a kthread_worker
480  * @worker: worker to flush
481  *
482  * Wait until all currently executing or pending works on @worker are
483  * finished.
484  */
485 void flush_kthread_worker(struct kthread_worker *worker)
486 {
487         struct kthread_flush_work fwork = {
488                 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
489                 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
490         };
491
492         queue_kthread_work(worker, &fwork.work);
493         wait_for_completion(&fwork.done);
494 }
495 EXPORT_SYMBOL_GPL(flush_kthread_worker);