ftrace: Update the kconfig for DYNAMIC_FTRACE
[linux-2.6.git] / kernel / workqueue.c
index e3378e8..a64b94e 100644 (file)
@@ -23,7 +23,7 @@
  * Please read Documentation/workqueue.txt for details.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t;
  * per-CPU workqueues:
  */
 struct workqueue_struct {
-       unsigned int            flags;          /* I: WQ_* flags */
+       unsigned int            flags;          /* W: WQ_* flags */
        union {
                struct cpu_workqueue_struct __percpu    *pcpu;
                struct cpu_workqueue_struct             *single;
@@ -240,11 +240,12 @@ struct workqueue_struct {
        mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
        struct worker           *rescuer;       /* I: rescue worker */
 
+       int                     nr_drainers;    /* W: drain in progress */
        int                     saved_max_active; /* W: saved cwq max_active */
-       const char              *name;          /* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
+       char                    name[];         /* I: workqueue name */
 };
 
 struct workqueue_struct *system_wq __read_mostly;
@@ -252,11 +253,13 @@ struct workqueue_struct *system_long_wq __read_mostly;
 struct workqueue_struct *system_nrt_wq __read_mostly;
 struct workqueue_struct *system_unbound_wq __read_mostly;
 struct workqueue_struct *system_freezable_wq __read_mostly;
+struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 EXPORT_SYMBOL_GPL(system_long_wq);
 EXPORT_SYMBOL_GPL(system_nrt_wq);
 EXPORT_SYMBOL_GPL(system_unbound_wq);
 EXPORT_SYMBOL_GPL(system_freezable_wq);
+EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -473,13 +476,8 @@ static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
                                            struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND)) {
-               if (likely(cpu < nr_cpu_ids)) {
-#ifdef CONFIG_SMP
+               if (likely(cpu < nr_cpu_ids))
                        return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
-#else
-                       return wq->cpu_wq.single;
-#endif
-               }
        } else if (likely(cpu == WORK_CPU_UNBOUND))
                return wq->cpu_wq.single;
        return NULL;
@@ -990,7 +988,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        debug_work_activate(work);
 
        /* if dying, only works from the same workqueue are allowed */
-       if (unlikely(wq->flags & WQ_DYING) &&
+       if (unlikely(wq->flags & WQ_DRAINING) &&
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
@@ -1143,8 +1141,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                unsigned int lcpu;
 
-               BUG_ON(timer_pending(timer));
-               BUG_ON(!list_empty(&work->entry));
+               WARN_ON_ONCE(timer_pending(timer));
+               WARN_ON_ONCE(!list_empty(&work->entry));
 
                timer_stats_timer_set_start_info(&dwork->timer);
 
@@ -1212,8 +1210,13 @@ static void worker_enter_idle(struct worker *worker)
        } else
                wake_up_all(&gcwq->trustee_wait);
 
-       /* sanity check nr_running */
-       WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
+       /*
+        * Sanity check nr_running.  Because trustee releases gcwq->lock
+        * between setting %WORKER_ROGUE and zapping nr_running, the
+        * warning may trigger spuriously.  Check iff trustee is idle.
+        */
+       WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
+                    gcwq->nr_workers == gcwq->nr_idle &&
                     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
 }
 
@@ -1861,7 +1864,9 @@ __acquires(&gcwq->lock)
 
        spin_unlock_irq(&gcwq->lock);
 
+       smp_wmb();      /* paired with test_and_set_bit(PENDING) */
        work_clear_pending(work);
+
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
@@ -2035,8 +2040,10 @@ static int rescuer_thread(void *__wq)
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
-       if (kthread_should_stop())
+       if (kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
                return 0;
+       }
 
        /*
         * See whether any cpu is asking for help.  Unbounded
@@ -2381,6 +2388,59 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
+/**
+ * drain_workqueue - drain a workqueue
+ * @wq: workqueue to drain
+ *
+ * Wait until the workqueue becomes empty.  While draining is in progress,
+ * only chain queueing is allowed.  IOW, only currently pending or running
+ * work items on @wq can queue further work items on it.  @wq is flushed
+ * repeatedly until it becomes empty.  The number of flushing is detemined
+ * by the depth of chaining and should be relatively short.  Whine if it
+ * takes too long.
+ */
+void drain_workqueue(struct workqueue_struct *wq)
+{
+       unsigned int flush_cnt = 0;
+       unsigned int cpu;
+
+       /*
+        * __queue_work() needs to test whether there are drainers, is much
+        * hotter than drain_workqueue() and already looks at @wq->flags.
+        * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+        */
+       spin_lock(&workqueue_lock);
+       if (!wq->nr_drainers++)
+               wq->flags |= WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+reflush:
+       flush_workqueue(wq);
+
+       for_each_cwq_cpu(cpu, wq) {
+               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               bool drained;
+
+               spin_lock_irq(&cwq->gcwq->lock);
+               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+               spin_unlock_irq(&cwq->gcwq->lock);
+
+               if (drained)
+                       continue;
+
+               if (++flush_cnt == 10 ||
+                   (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+                       pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                                  wq->name, flush_cnt);
+               goto reflush;
+       }
+
+       spin_lock(&workqueue_lock);
+       if (!--wq->nr_drainers)
+               wq->flags &= ~WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(drain_workqueue);
+
 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                             bool wait_executing)
 {
@@ -2843,13 +2903,8 @@ static int alloc_cwqs(struct workqueue_struct *wq)
        const size_t size = sizeof(struct cpu_workqueue_struct);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
 
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                wq->cpu_wq.pcpu = __alloc_percpu(size, align);
        else {
                void *ptr;
@@ -2866,22 +2921,14 @@ static int alloc_cwqs(struct workqueue_struct *wq)
                }
        }
 
-       /* just in case, make sure it's actually aligned
-        * - this is affected by PERCPU() alignment in vmlinux.lds.S
-        */
+       /* just in case, make sure it's actually aligned */
        BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
        return wq->cpu_wq.v ? 0 : -ENOMEM;
 }
 
 static void free_cwqs(struct workqueue_struct *wq)
 {
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
-
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                free_percpu(wq->cpu_wq.pcpu);
        else if (wq->cpu_wq.single) {
                /* the pointer to free is stored right after the cwq */
@@ -2902,14 +2949,29 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
        return clamp_val(max_active, 1, lim);
 }
 
-struct workqueue_struct *__alloc_workqueue_key(const char *name,
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                                               unsigned int flags,
                                               int max_active,
                                               struct lock_class_key *key,
-                                              const char *lock_name)
+                                              const char *lock_name, ...)
 {
+       va_list args, args1;
        struct workqueue_struct *wq;
        unsigned int cpu;
+       size_t namelen;
+
+       /* determine namelen, allocate wq and format name */
+       va_start(args, lock_name);
+       va_copy(args1, args);
+       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+
+       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+       if (!wq)
+               goto err;
+
+       vsnprintf(wq->name, namelen, fmt, args1);
+       va_end(args);
+       va_end(args1);
 
        /*
         * Workqueues which may be used during memory reclaim should
@@ -2926,12 +2988,9 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                flags |= WQ_HIGHPRI;
 
        max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, flags, name);
-
-       wq = kzalloc(sizeof(*wq), GFP_KERNEL);
-       if (!wq)
-               goto err;
+       max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
+       /* init wq */
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
@@ -2939,7 +2998,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
-       wq->name = name;
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
@@ -2968,7 +3026,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
+               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+                                              wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
 
@@ -3011,34 +3070,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
  */
 void destroy_workqueue(struct workqueue_struct *wq)
 {
-       unsigned int flush_cnt = 0;
        unsigned int cpu;
 
-       /*
-        * Mark @wq dying and drain all pending works.  Once WQ_DYING is
-        * set, only chain queueing is allowed.  IOW, only currently
-        * pending or running work items on @wq can queue further work
-        * items on it.  @wq is flushed repeatedly until it becomes empty.
-        * The number of flushing is detemined by the depth of chaining and
-        * should be relatively short.  Whine if it takes too long.
-        */
-       wq->flags |= WQ_DYING;
-reflush:
-       flush_workqueue(wq);
-
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
-               if (!cwq->nr_active && list_empty(&cwq->delayed_works))
-                       continue;
-
-               if (++flush_cnt == 10 ||
-                   (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       printk(KERN_WARNING "workqueue %s: flush on "
-                              "destruction isn't complete after %u tries\n",
-                              wq->name, flush_cnt);
-               goto reflush;
-       }
+       /* drain it before proceeding with destruction */
+       drain_workqueue(wq);
 
        /*
         * wq list is used to freeze wq, remove from list after
@@ -3402,14 +3437,17 @@ static int __cpuinit trustee_thread(void *__gcwq)
 
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
+               unsigned long worker_flags = worker->flags;
 
                /*
                 * Rebind_work may race with future cpu hotplug
                 * operations.  Use a separate flag to mark that
-                * rebinding is scheduled.
+                * rebinding is scheduled.  The morphing should
+                * be atomic.
                 */
-               worker->flags |= WORKER_REBIND;
-               worker->flags &= ~WORKER_ROGUE;
+               worker_flags |= WORKER_REBIND;
+               worker_flags &= ~WORKER_ROGUE;
+               ACCESS_ONCE(worker->flags) = worker_flags;
 
                /* queue rebind_work, wq doesn't matter, use the default one */
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
@@ -3551,21 +3589,55 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        return notifier_from_errno(0);
 }
 
+/*
+ * Workqueues should be brought up before normal priority CPU notifiers.
+ * This will be registered high priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+                                              unsigned long action,
+                                              void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_UP_PREPARE:
+       case CPU_UP_CANCELED:
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE:
+               return workqueue_cpu_callback(nfb, action, hcpu);
+       }
+       return NOTIFY_OK;
+}
+
+/*
+ * Workqueues should be brought down after normal priority CPU notifiers.
+ * This will be registered as low priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+                                                unsigned long action,
+                                                void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DOWN_PREPARE:
+       case CPU_DYING:
+       case CPU_POST_DEAD:
+               return workqueue_cpu_callback(nfb, action, hcpu);
+       }
+       return NOTIFY_OK;
+}
+
 #ifdef CONFIG_SMP
 
 struct work_for_cpu {
-       struct completion completion;
+       struct work_struct work;
        long (*fn)(void *);
        void *arg;
        long ret;
 };
 
-static int do_work_for_cpu(void *_wfc)
+static void work_for_cpu_fn(struct work_struct *work)
 {
-       struct work_for_cpu *wfc = _wfc;
+       struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
+
        wfc->ret = wfc->fn(wfc->arg);
-       complete(&wfc->completion);
-       return 0;
 }
 
 /**
@@ -3580,19 +3652,11 @@ static int do_work_for_cpu(void *_wfc)
  */
 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 {
-       struct task_struct *sub_thread;
-       struct work_for_cpu wfc = {
-               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
-               .fn = fn,
-               .arg = arg,
-       };
+       struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 
-       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
-       if (IS_ERR(sub_thread))
-               return PTR_ERR(sub_thread);
-       kthread_bind(sub_thread, cpu);
-       wake_up_process(sub_thread);
-       wait_for_completion(&wfc.completion);
+       INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
+       schedule_work_on(cpu, &wfc.work);
+       flush_work(&wfc.work);
        return wfc.ret;
 }
 EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -3744,7 +3808,8 @@ static int __init init_workqueues(void)
        unsigned int cpu;
        int i;
 
-       cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+       cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
+       cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
        /* initialize gcwqs */
        for_each_gcwq_cpu(cpu) {
@@ -3793,8 +3858,11 @@ static int __init init_workqueues(void)
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
+       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
+                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
        BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
-              !system_unbound_wq || !system_freezable_wq);
+              !system_unbound_wq || !system_freezable_wq ||
+               !system_nrt_freezable_wq);
        return 0;
 }
 early_initcall(init_workqueues);