Merge master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6.git] / kernel / workqueue.c
index 828e582..dee4865 100644 (file)
@@ -9,7 +9,7 @@
  * Derived from the taskqueue/keventd code by:
  *
  *   David Woodhouse <dwmw2@infradead.org>
- *   Andrew Morton <andrewm@uow.edu.au>
+ *   Andrew Morton
  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
  *   Theodore Ts'o <tytso@mit.edu>
  *
@@ -33,6 +33,8 @@
 #include <linux/kallsyms.h>
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -48,8 +50,6 @@ struct cpu_workqueue_struct {
 
        struct workqueue_struct *wq;
        struct task_struct *thread;
-
-       int run_depth;          /* Detect run_workqueue() recursion depth */
 } ____cacheline_aligned;
 
 /*
@@ -62,17 +62,128 @@ struct workqueue_struct {
        const char *name;
        int singlethread;
        int freezeable;         /* Freeze threads during suspend */
+       int rt;
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map lockdep_map;
 #endif
 };
 
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+static struct debug_obj_descr work_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int work_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               cancel_work_sync(work);
+               debug_object_init(work, &work_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int work_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               /*
+                * This is not really a fixup. The work struct was
+                * statically initialized. We just make sure that it
+                * is tracked in the object tracker.
+                */
+               if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
+                       debug_object_init(work, &work_debug_descr);
+                       debug_object_activate(work, &work_debug_descr);
+                       return 0;
+               }
+               WARN_ON_ONCE(1);
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int work_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct work_struct *work = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               cancel_work_sync(work);
+               debug_object_free(work, &work_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr work_debug_descr = {
+       .name           = "work_struct",
+       .fixup_init     = work_fixup_init,
+       .fixup_activate = work_fixup_activate,
+       .fixup_free     = work_fixup_free,
+};
+
+static inline void debug_work_activate(struct work_struct *work)
+{
+       debug_object_activate(work, &work_debug_descr);
+}
+
+static inline void debug_work_deactivate(struct work_struct *work)
+{
+       debug_object_deactivate(work, &work_debug_descr);
+}
+
+void __init_work(struct work_struct *work, int onstack)
+{
+       if (onstack)
+               debug_object_init_on_stack(work, &work_debug_descr);
+       else
+               debug_object_init(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(__init_work);
+
+void destroy_work_on_stack(struct work_struct *work)
+{
+       debug_object_free(work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_work_on_stack);
+
+#else
+static inline void debug_work_activate(struct work_struct *work) { }
+static inline void debug_work_deactivate(struct work_struct *work) { }
+#endif
+
 /* Serializes the accesses to the list of workqueues. */
 static DEFINE_SPINLOCK(workqueue_lock);
 static LIST_HEAD(workqueues);
 
 static int singlethread_cpu __read_mostly;
-static cpumask_t cpu_singlethread_map __read_mostly;
+static const struct cpumask *cpu_singlethread_map __read_mostly;
 /*
  * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
  * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
@@ -80,24 +191,24 @@ static cpumask_t cpu_singlethread_map __read_mostly;
  * use cpu_possible_map, the cpumask below is more a documentation
  * than optimization.
  */
-static cpumask_t cpu_populated_map __read_mostly;
+static cpumask_var_t cpu_populated_map __read_mostly;
 
 /* If it's single threaded, it isn't in the list of workqueues. */
-static inline int is_single_threaded(struct workqueue_struct *wq)
+static inline int is_wq_single_threaded(struct workqueue_struct *wq)
 {
        return wq->singlethread;
 }
 
-static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
+static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
 {
-       return is_single_threaded(wq)
-               ? &cpu_singlethread_map : &cpu_populated_map;
+       return is_wq_single_threaded(wq)
+               ? cpu_singlethread_map : cpu_populated_map;
 }
 
 static
 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
 {
-       if (unlikely(is_single_threaded(wq)))
+       if (unlikely(is_wq_single_threaded(wq)))
                cpu = singlethread_cpu;
        return per_cpu_ptr(wq->cpu_wq, cpu);
 }
@@ -127,6 +238,8 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head)
 {
+       trace_workqueue_insertion(cwq->thread, work);
+
        set_wq_data(work, cwq);
        /*
         * Ensure that we get the right work->data if we see the
@@ -142,6 +255,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
 {
        unsigned long flags;
 
+       debug_work_activate(work);
        spin_lock_irqsave(&cwq->lock, flags);
        insert_work(cwq, work, &cwq->worklist);
        spin_unlock_irqrestore(&cwq->lock, flags);
@@ -159,14 +273,11 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
  */
 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
-       int ret = 0;
+       int ret;
+
+       ret = queue_work_on(get_cpu(), wq, work);
+       put_cpu();
 
-       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
-               BUG_ON(!list_empty(&work->entry));
-               __queue_work(wq_per_cpu(wq, get_cpu()), work);
-               put_cpu();
-               ret = 1;
-       }
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_work);
@@ -264,13 +375,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 static void run_workqueue(struct cpu_workqueue_struct *cwq)
 {
        spin_lock_irq(&cwq->lock);
-       cwq->run_depth++;
-       if (cwq->run_depth > 3) {
-               /* morton gets to eat his hat */
-               printk("%s: recursion depth exceeded: %d\n",
-                       __func__, cwq->run_depth);
-               dump_stack();
-       }
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
@@ -286,18 +390,19 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                 */
                struct lockdep_map lockdep_map = work->lockdep_map;
 #endif
-
+               trace_workqueue_execution(cwq->thread, work);
+               debug_work_deactivate(work);
                cwq->current_work = work;
                list_del_init(cwq->worklist.next);
                spin_unlock_irq(&cwq->lock);
 
                BUG_ON(get_wq_data(work) != cwq);
                work_clear_pending(work);
-               lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-               lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
+               lock_map_acquire(&cwq->wq->lockdep_map);
+               lock_map_acquire(&lockdep_map);
                f(work);
-               lock_release(&lockdep_map, 1, _THIS_IP_);
-               lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+               lock_map_release(&lockdep_map);
+               lock_map_release(&cwq->wq->lockdep_map);
 
                if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                        printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -313,7 +418,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
                spin_lock_irq(&cwq->lock);
                cwq->current_work = NULL;
        }
-       cwq->run_depth--;
        spin_unlock_irq(&cwq->lock);
 }
 
@@ -325,8 +429,6 @@ static int worker_thread(void *__cwq)
        if (cwq->wq->freezeable)
                set_freezable();
 
-       set_user_nice(current, -5);
-
        for (;;) {
                prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
                if (!freezing(current) &&
@@ -360,38 +462,38 @@ static void wq_barrier_func(struct work_struct *work)
 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
                        struct wq_barrier *barr, struct list_head *head)
 {
-       INIT_WORK(&barr->work, wq_barrier_func);
+       /*
+        * debugobject calls are safe here even with cwq->lock locked
+        * as we know for sure that this will not trigger any of the
+        * checks and call back into the fixup functions where we
+        * might deadlock.
+        */
+       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
 
        init_completion(&barr->done);
 
+       debug_work_activate(&barr->work);
        insert_work(cwq, &barr->work, head);
 }
 
 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
 {
-       int active;
+       int active = 0;
+       struct wq_barrier barr;
 
-       if (cwq->thread == current) {
-               /*
-                * Probably keventd trying to flush its own queue. So simply run
-                * it by hand rather than deadlocking.
-                */
-               run_workqueue(cwq);
-               active = 1;
-       } else {
-               struct wq_barrier barr;
+       WARN_ON(cwq->thread == current);
 
-               active = 0;
-               spin_lock_irq(&cwq->lock);
-               if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
-                       insert_wq_barrier(cwq, &barr, &cwq->worklist);
-                       active = 1;
-               }
-               spin_unlock_irq(&cwq->lock);
+       spin_lock_irq(&cwq->lock);
+       if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
+               insert_wq_barrier(cwq, &barr, &cwq->worklist);
+               active = 1;
+       }
+       spin_unlock_irq(&cwq->lock);
 
-               if (active)
-                       wait_for_completion(&barr.done);
+       if (active) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
        }
 
        return active;
@@ -412,13 +514,13 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
  */
 void flush_workqueue(struct workqueue_struct *wq)
 {
-       const cpumask_t *cpu_map = wq_cpu_map(wq);
+       const struct cpumask *cpu_map = wq_cpu_map(wq);
        int cpu;
 
        might_sleep();
-       lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&wq->lockdep_map, 1, _THIS_IP_);
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       lock_map_acquire(&wq->lockdep_map);
+       lock_map_release(&wq->lockdep_map);
+       for_each_cpu(cpu, cpu_map)
                flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -427,6 +529,8 @@ EXPORT_SYMBOL_GPL(flush_workqueue);
  * flush_work - block until a work_struct's callback has terminated
  * @work: the work which is to be flushed
  *
+ * Returns false if @work has already terminated.
+ *
  * It is expected that, prior to calling flush_work(), the caller has
  * arranged for the work to not be requeued, otherwise it doesn't make
  * sense to use this function.
@@ -442,6 +546,9 @@ int flush_work(struct work_struct *work)
        if (!cwq)
                return 0;
 
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
+
        prev = NULL;
        spin_lock_irq(&cwq->lock);
        if (!list_empty(&work->entry)) {
@@ -465,6 +572,7 @@ out:
                return 0;
 
        wait_for_completion(&barr.done);
+       destroy_work_on_stack(&barr.work);
        return 1;
 }
 EXPORT_SYMBOL_GPL(flush_work);
@@ -499,6 +607,7 @@ static int try_to_grab_pending(struct work_struct *work)
                 */
                smp_rmb();
                if (cwq == get_wq_data(work)) {
+                       debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        ret = 1;
                }
@@ -521,21 +630,23 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
        }
        spin_unlock_irq(&cwq->lock);
 
-       if (unlikely(running))
+       if (unlikely(running)) {
                wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+       }
 }
 
 static void wait_on_work(struct work_struct *work)
 {
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
-       const cpumask_t *cpu_map;
+       const struct cpumask *cpu_map;
        int cpu;
 
        might_sleep();
 
-       lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&work->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
 
        cwq = get_wq_data(work);
        if (!cwq)
@@ -544,7 +655,7 @@ static void wait_on_work(struct work_struct *work)
        wq = cwq->wq;
        cpu_map = wq_cpu_map(wq);
 
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       for_each_cpu(cpu, cpu_map)
                wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
 }
 
@@ -612,7 +723,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
  * schedule_work - put work task in global workqueue
  * @work: job to be done
  *
- * This puts a job in the kernel-global workqueue.
+ * Returns zero if @work was already on the kernel-global workqueue and
+ * non-zero otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
  */
 int schedule_work(struct work_struct *work)
 {
@@ -649,6 +765,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
+ * flush_delayed_work - block until a dwork_struct's callback has terminated
+ * @dwork: the delayed work which is to be flushed
+ *
+ * Any timeout is cancelled, and any pending work is run immediately.
+ */
+void flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer)) {
+               struct cpu_workqueue_struct *cwq;
+               cwq = wq_per_cpu(keventd_wq, get_cpu());
+               __queue_work(cwq, &dwork->work);
+               put_cpu();
+       }
+       flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -676,6 +810,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
+       int orig = -1;
        struct work_struct *works;
 
        works = alloc_percpu(struct work_struct);
@@ -683,15 +818,28 @@ int schedule_on_each_cpu(work_func_t func)
                return -ENOMEM;
 
        get_online_cpus();
+
+       /*
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
+        */
+       if (current_is_keventd())
+               orig = raw_smp_processor_id();
+
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
                INIT_WORK(work, func);
-               set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
-               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
+               if (cpu != orig)
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
@@ -765,8 +913,9 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
 
 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 {
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
        struct workqueue_struct *wq = cwq->wq;
-       const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
+       const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
        struct task_struct *p;
 
        p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
@@ -780,9 +929,12 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
         */
        if (IS_ERR(p))
                return PTR_ERR(p);
-
+       if (cwq->wq->rt)
+               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
        cwq->thread = p;
 
+       trace_workqueue_creation(cwq->thread, cpu);
+
        return 0;
 }
 
@@ -800,6 +952,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 struct workqueue_struct *__create_workqueue_key(const char *name,
                                                int singlethread,
                                                int freezeable,
+                                               int rt,
                                                struct lock_class_key *key,
                                                const char *lock_name)
 {
@@ -821,6 +974,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        wq->singlethread = singlethread;
        wq->freezeable = freezeable;
+       wq->rt = rt;
        INIT_LIST_HEAD(&wq->list);
 
        if (singlethread) {
@@ -829,10 +983,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
                start_workqueue_thread(cwq, -1);
        } else {
                cpu_maps_update_begin();
+               /*
+                * We must place this wq on list even if the code below fails.
+                * cpu_down(cpu) can remove cpu from cpu_populated_map before
+                * destroy_workqueue() takes the lock, in that case we leak
+                * cwq[cpu]->thread.
+                */
                spin_lock(&workqueue_lock);
                list_add(&wq->list, &workqueues);
                spin_unlock(&workqueue_lock);
-
+               /*
+                * We must initialize cwqs for each possible cpu even if we
+                * are going to call destroy_workqueue() finally. Otherwise
+                * cpu_up() can hit the uninitialized cwq once we drop the
+                * lock.
+                */
                for_each_possible_cpu(cpu) {
                        cwq = init_cpu_workqueue(wq, cpu);
                        if (err || !cpu_online(cpu))
@@ -860,8 +1025,8 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
        if (cwq->thread == NULL)
                return;
 
-       lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
-       lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
+       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_release(&cwq->wq->lockdep_map);
 
        flush_cpu_workqueue(cwq);
        /*
@@ -874,6 +1039,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
         * checks list_empty(), and a "normal" queue_work() can't use
         * a dead CPU.
         */
+       trace_workqueue_destruction(cwq->thread);
        kthread_stop(cwq->thread);
        cwq->thread = NULL;
 }
@@ -886,7 +1052,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
-       const cpumask_t *cpu_map = wq_cpu_map(wq);
+       const struct cpumask *cpu_map = wq_cpu_map(wq);
        int cpu;
 
        cpu_maps_update_begin();
@@ -894,7 +1060,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
 
-       for_each_cpu_mask_nr(cpu, *cpu_map)
+       for_each_cpu(cpu, cpu_map)
                cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
        cpu_maps_update_done();
 
@@ -910,14 +1076,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_workqueue_struct *cwq;
        struct workqueue_struct *wq;
+       int ret = NOTIFY_OK;
 
        action &= ~CPU_TASKS_FROZEN;
 
        switch (action) {
        case CPU_UP_PREPARE:
-               cpu_set(cpu, cpu_populated_map);
+               cpumask_set_cpu(cpu, cpu_populated_map);
        }
-
+undo:
        list_for_each_entry(wq, &workqueues, list) {
                cwq = per_cpu_ptr(wq->cpu_wq, cpu);
 
@@ -927,7 +1094,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                                break;
                        printk(KERN_ERR "workqueue [%s] for %i failed\n",
                                wq->name, cpu);
-                       return NOTIFY_BAD;
+                       action = CPU_UP_CANCELED;
+                       ret = NOTIFY_BAD;
+                       goto undo;
 
                case CPU_ONLINE:
                        start_workqueue_thread(cwq, cpu);
@@ -944,17 +1113,66 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_UP_CANCELED:
        case CPU_POST_DEAD:
-               cpu_clear(cpu, cpu_populated_map);
+               cpumask_clear_cpu(cpu, cpu_populated_map);
        }
 
-       return NOTIFY_OK;
+       return ret;
 }
 
+#ifdef CONFIG_SMP
+
+struct work_for_cpu {
+       struct completion completion;
+       long (*fn)(void *);
+       void *arg;
+       long ret;
+};
+
+static int do_work_for_cpu(void *_wfc)
+{
+       struct work_for_cpu *wfc = _wfc;
+       wfc->ret = wfc->fn(wfc->arg);
+       complete(&wfc->completion);
+       return 0;
+}
+
+/**
+ * work_on_cpu - run a function in user context on a particular cpu
+ * @cpu: the cpu to run on
+ * @fn: the function to run
+ * @arg: the function arg
+ *
+ * This will return the value @fn returns.
+ * It is up to the caller to ensure that the cpu doesn't go offline.
+ * The caller must not hold any locks which would prevent @fn from completing.
+ */
+long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+{
+       struct task_struct *sub_thread;
+       struct work_for_cpu wfc = {
+               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
+               .fn = fn,
+               .arg = arg,
+       };
+
+       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
+       if (IS_ERR(sub_thread))
+               return PTR_ERR(sub_thread);
+       kthread_bind(sub_thread, cpu);
+       wake_up_process(sub_thread);
+       wait_for_completion(&wfc.completion);
+       return wfc.ret;
+}
+EXPORT_SYMBOL_GPL(work_on_cpu);
+#endif /* CONFIG_SMP */
+
 void __init init_workqueues(void)
 {
-       cpu_populated_map = cpu_online_map;
-       singlethread_cpu = first_cpu(cpu_possible_map);
-       cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
+       alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
+
+       cpumask_copy(cpu_populated_map, cpu_online_mask);
+       singlethread_cpu = cpumask_first(cpu_possible_mask);
+       cpu_singlethread_map = cpumask_of(singlethread_cpu);
        hotcpu_notifier(workqueue_cpu_callback, 0);
        keventd_wq = create_workqueue("events");
        BUG_ON(!keventd_wq);