]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/softirq.c
cpu-hotplug: CPUx should be active before it is marked online
[linux-2.6.git] / kernel / softirq.c
index d105a82543d001029218240da411abf4893b587f..fca82c32042b73133f2ab74838287c94cf8ad152 100644 (file)
@@ -24,7 +24,9 @@
 #include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/tick.h>
-#include <trace/irq.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/irq.h>
 
 #include <asm/irq.h>
 /*
@@ -52,11 +54,11 @@ EXPORT_SYMBOL(irq_stat);
 
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
-static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 char *softirq_to_name[NR_SOFTIRQS] = {
-       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK",
-       "TASKLET", "SCHED", "HRTIMER",  "RCU"
+       "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+       "TASKLET", "SCHED", "HRTIMER", "RCU"
 };
 
 /*
@@ -65,21 +67,31 @@ char *softirq_to_name[NR_SOFTIRQS] = {
  * to the pending events, so lets the scheduler to balance
  * the softirq load for us.
  */
-static inline void wakeup_softirqd(void)
+static void wakeup_softirqd(void)
 {
        /* Interrupts are disabled: no need to stop preemption */
-       struct task_struct *tsk = __get_cpu_var(ksoftirqd);
+       struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);
 }
 
+/*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+ *   softirq processing.
+ * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ *   on local_bh_disable or local_bh_enable.
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+
 /*
  * This one is for softirq.c-internal use,
  * where hardirqs are disabled legitimately:
  */
 #ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip)
+static void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
        unsigned long flags;
 
@@ -93,32 +105,43 @@ static void __local_bh_disable(unsigned long ip)
         * We must manually increment preempt_count here and manually
         * call the trace_preempt_off later.
         */
-       preempt_count() += SOFTIRQ_OFFSET;
+       preempt_count() += cnt;
        /*
         * Were softirqs turned off above:
         */
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == cnt)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 
-       if (preempt_count() == SOFTIRQ_OFFSET)
+       if (preempt_count() == cnt)
                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 #else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip)
+static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
 {
-       add_preempt_count(SOFTIRQ_OFFSET);
+       add_preempt_count(cnt);
        barrier();
 }
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
 void local_bh_disable(void)
 {
-       __local_bh_disable((unsigned long)__builtin_return_address(0));
+       __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_DISABLE_OFFSET);
 }
 
 EXPORT_SYMBOL(local_bh_disable);
 
+static void __local_bh_enable(unsigned int cnt)
+{
+       WARN_ON_ONCE(in_irq());
+       WARN_ON_ONCE(!irqs_disabled());
+
+       if (softirq_count() == cnt)
+               trace_softirqs_on((unsigned long)__builtin_return_address(0));
+       sub_preempt_count(cnt);
+}
+
 /*
  * Special-case - softirqs can safely be enabled in
  * cond_resched_softirq(), or by __do_softirq(),
@@ -126,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable);
  */
 void _local_bh_enable(void)
 {
-       WARN_ON_ONCE(in_irq());
-       WARN_ON_ONCE(!irqs_disabled());
-
-       if (softirq_count() == SOFTIRQ_OFFSET)
-               trace_softirqs_on((unsigned long)__builtin_return_address(0));
-       sub_preempt_count(SOFTIRQ_OFFSET);
+       __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
 }
 
 EXPORT_SYMBOL(_local_bh_enable);
@@ -145,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip)
        /*
         * Are softirqs going to be turned on now:
         */
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
                trace_softirqs_on(ip);
        /*
         * Keep preemption disabled until we are done with
         * softirq processing:
         */
-       sub_preempt_count(SOFTIRQ_OFFSET - 1);
+       sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
 
        if (unlikely(!in_interrupt() && local_softirq_pending()))
                do_softirq();
@@ -186,9 +204,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
  */
 #define MAX_SOFTIRQ_RESTART 10
 
-DEFINE_TRACE(softirq_entry);
-DEFINE_TRACE(softirq_exit);
-
 asmlinkage void __do_softirq(void)
 {
        struct softirq_action *h;
@@ -199,7 +214,8 @@ asmlinkage void __do_softirq(void)
        pending = local_softirq_pending();
        account_system_vtime(current);
 
-       __local_bh_disable((unsigned long)__builtin_return_address(0));
+       __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
        lockdep_softirq_enter();
 
        cpu = smp_processor_id();
@@ -213,21 +229,24 @@ restart:
 
        do {
                if (pending & 1) {
+                       unsigned int vec_nr = h - softirq_vec;
                        int prev_count = preempt_count();
 
-                       trace_softirq_entry(h, softirq_vec);
+                       kstat_incr_softirqs_this_cpu(vec_nr);
+
+                       trace_softirq_entry(vec_nr);
                        h->action(h);
-                       trace_softirq_exit(h, softirq_vec);
+                       trace_softirq_exit(vec_nr);
                        if (unlikely(prev_count != preempt_count())) {
-                               printk(KERN_ERR "huh, entered softirq %td %s %p"
+                               printk(KERN_ERR "huh, entered softirq %u %s %p"
                                       "with preempt_count %08x,"
-                                      " exited with %08x?\n", h - softirq_vec,
-                                      softirq_to_name[h - softirq_vec],
-                                      h->action, prev_count, preempt_count());
+                                      " exited with %08x?\n", vec_nr,
+                                      softirq_to_name[vec_nr], h->action,
+                                      prev_count, preempt_count());
                                preempt_count() = prev_count;
                        }
 
-                       rcu_bh_qsctr_inc(cpu);
+                       rcu_bh_qs(cpu);
                }
                h++;
                pending >>= 1;
@@ -245,7 +264,7 @@ restart:
        lockdep_softirq_exit();
 
        account_system_vtime(current);
-       _local_bh_enable();
+       __local_bh_enable(SOFTIRQ_OFFSET);
 }
 
 #ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -279,16 +298,42 @@ void irq_enter(void)
 
        rcu_irq_enter();
        if (idle_cpu(cpu) && !in_interrupt()) {
-               __irq_enter();
+               /*
+                * Prevent raise_softirq from needlessly waking up ksoftirqd
+                * here, as softirq will be serviced on return from interrupt.
+                */
+               local_bh_disable();
                tick_check_idle(cpu);
-       } else
-               __irq_enter();
+               _local_bh_enable();
+       }
+
+       __irq_enter();
 }
 
 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
-# define invoke_softirq()      __do_softirq()
+static inline void invoke_softirq(void)
+{
+       if (!force_irqthreads)
+               __do_softirq();
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
+               wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
+}
 #else
-# define invoke_softirq()      do_softirq()
+static inline void invoke_softirq(void)
+{
+       if (!force_irqthreads)
+               do_softirq();
+       else {
+               __local_bh_disable((unsigned long)__builtin_return_address(0),
+                               SOFTIRQ_OFFSET);
+               wakeup_softirqd();
+               __local_bh_enable(SOFTIRQ_OFFSET);
+       }
+}
 #endif
 
 /*
@@ -302,9 +347,9 @@ void irq_exit(void)
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();
 
+       rcu_irq_exit();
 #ifdef CONFIG_NO_HZ
        /* Make sure that timer wheel updates are propagated */
-       rcu_irq_exit();
        if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
                tick_nohz_stop_sched_tick(0);
 #endif
@@ -345,7 +390,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
        softirq_vec[nr].action = action;
 }
 
-/* Tasklets */
+/*
+ * Tasklets
+ */
 struct tasklet_head
 {
        struct tasklet_struct *head;
@@ -361,8 +408,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_vec).tail = t;
-       __get_cpu_var(tasklet_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_vec.tail) = t;
+       __this_cpu_write(tasklet_vec.tail, &(t->next));
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -375,22 +422,33 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_hi_vec).tail = t;
-       __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_hi_vec.tail) = t;
+       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
        raise_softirq_irqoff(HI_SOFTIRQ);
        local_irq_restore(flags);
 }
 
 EXPORT_SYMBOL(__tasklet_hi_schedule);
 
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+       BUG_ON(!irqs_disabled());
+
+       t->next = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, t);
+       __raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
 static void tasklet_action(struct softirq_action *a)
 {
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_vec).head;
-       __get_cpu_var(tasklet_vec).head = NULL;
-       __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+       list = __this_cpu_read(tasklet_vec.head);
+       __this_cpu_write(tasklet_vec.head, NULL);
+       __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -411,8 +469,8 @@ static void tasklet_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_vec).tail = t;
-               __get_cpu_var(tasklet_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_vec.tail) = t;
+               __this_cpu_write(tasklet_vec.tail, &(t->next));
                __raise_softirq_irqoff(TASKLET_SOFTIRQ);
                local_irq_enable();
        }
@@ -423,9 +481,9 @@ static void tasklet_hi_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = NULL;
-       __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
+       list = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, NULL);
+       __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -446,8 +504,8 @@ static void tasklet_hi_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_hi_vec).tail = t;
-               __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_hi_vec.tail) = t;
+               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
                __raise_softirq_irqoff(HI_SOFTIRQ);
                local_irq_enable();
        }
@@ -472,9 +530,9 @@ void tasklet_kill(struct tasklet_struct *t)
                printk("Attempt to kill tasklet from interrupt\n");
 
        while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
-               do
+               do {
                        yield();
-               while (test_bit(TASKLET_STATE_SCHED, &t->state));
+               while (test_bit(TASKLET_STATE_SCHED, &t->state));
        }
        tasklet_unlock_wait(t);
        clear_bit(TASKLET_STATE_SCHED, &t->state);
@@ -482,6 +540,61 @@ void tasklet_kill(struct tasklet_struct *t)
 
 EXPORT_SYMBOL(tasklet_kill);
 
+/*
+ * tasklet_hrtimer
+ */
+
+/*
+ * The trampoline is called when the hrtimer expires. It schedules a tasklet
+ * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
+ * hrtimer callback, but from softirq context.
+ */
+static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
+{
+       struct tasklet_hrtimer *ttimer =
+               container_of(timer, struct tasklet_hrtimer, timer);
+
+       tasklet_hi_schedule(&ttimer->tasklet);
+       return HRTIMER_NORESTART;
+}
+
+/*
+ * Helper function which calls the hrtimer callback from
+ * tasklet/softirq context
+ */
+static void __tasklet_hrtimer_trampoline(unsigned long data)
+{
+       struct tasklet_hrtimer *ttimer = (void *)data;
+       enum hrtimer_restart restart;
+
+       restart = ttimer->function(&ttimer->timer);
+       if (restart != HRTIMER_NORESTART)
+               hrtimer_restart(&ttimer->timer);
+}
+
+/**
+ * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
+ * @ttimer:     tasklet_hrtimer which is initialized
+ * @function:   hrtimer callback function which gets called from softirq context
+ * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
+ * @mode:       hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
+ */
+void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+                         enum hrtimer_restart (*function)(struct hrtimer *),
+                         clockid_t which_clock, enum hrtimer_mode mode)
+{
+       hrtimer_init(&ttimer->timer, which_clock, mode);
+       ttimer->timer.function = __hrtimer_tasklet_trampoline;
+       tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
+                    (unsigned long)ttimer);
+       ttimer->function = function;
+}
+EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
+
+/*
+ * Remote softirq bits
+ */
+
 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
 
@@ -624,7 +737,7 @@ void __init softirq_init(void)
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }
 
-static int ksoftirqd(void * __bind_cpu)
+static int run_ksoftirqd(void * __bind_cpu)
 {
        set_current_state(TASK_INTERRUPTIBLE);
 
@@ -644,11 +757,14 @@ static int ksoftirqd(void * __bind_cpu)
                           don't process */
                        if (cpu_is_offline((long)__bind_cpu))
                                goto wait_to_die;
-                       do_softirq();
+                       local_irq_disable();
+                       if (local_softirq_pending())
+                               __do_softirq();
+                       local_irq_enable();
                        preempt_enable_no_resched();
                        cond_resched();
                        preempt_disable();
-                       rcu_qsctr_inc((long)__bind_cpu);
+                       rcu_note_context_switch((long)__bind_cpu);
                }
                preempt_enable();
                set_current_state(TASK_INTERRUPTIBLE);
@@ -708,16 +824,16 @@ static void takeover_tasklets(unsigned int cpu)
 
        /* Find end, append list for that CPU. */
        if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
-               *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
-               __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+               *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
+               this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
                per_cpu(tasklet_vec, cpu).head = NULL;
                per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
        }
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
        if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
-               *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-               __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+               *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
+               __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
                per_cpu(tasklet_hi_vec, cpu).head = NULL;
                per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
        }
@@ -737,10 +853,13 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
+               p = kthread_create_on_node(run_ksoftirqd,
+                                          hcpu,
+                                          cpu_to_node(hotcpu),
+                                          "ksoftirqd/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk("ksoftirqd for %i failed\n", hotcpu);
-                       return NOTIFY_BAD;
+                       return notifier_from_errno(PTR_ERR(p));
                }
                kthread_bind(p, hotcpu);
                per_cpu(ksoftirqd, hotcpu) = p;
@@ -759,7 +878,9 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
                             cpumask_any(cpu_online_mask));
        case CPU_DEAD:
        case CPU_DEAD_FROZEN: {
-               struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+               static const struct sched_param param = {
+                       .sched_priority = MAX_RT_PRIO-1
+               };
 
                p = per_cpu(ksoftirqd, hotcpu);
                per_cpu(ksoftirqd, hotcpu) = NULL;
@@ -782,32 +903,13 @@ static __init int spawn_ksoftirqd(void)
        void *cpu = (void *)(long)smp_processor_id();
        int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
 
-       BUG_ON(err == NOTIFY_BAD);
+       BUG_ON(err != NOTIFY_OK);
        cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
        register_cpu_notifier(&cpu_nfb);
        return 0;
 }
 early_initcall(spawn_ksoftirqd);
 
-#ifdef CONFIG_SMP
-/*
- * Call a function on all processors
- */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
-{
-       int ret = 0;
-
-       preempt_disable();
-       ret = smp_call_function(func, info, wait);
-       local_irq_disable();
-       func(info);
-       local_irq_enable();
-       preempt_enable();
-       return ret;
-}
-EXPORT_SYMBOL(on_each_cpu);
-#endif
-
 /*
  * [ These __weak aliases are kept in a separate compilation unit, so that
  *   GCC does not inline them incorrectly. ]
@@ -818,17 +920,14 @@ int __init __weak early_irq_init(void)
        return 0;
 }
 
+#ifdef CONFIG_GENERIC_HARDIRQS
 int __init __weak arch_probe_nr_irqs(void)
 {
-       return 0;
+       return NR_IRQS_LEGACY;
 }
 
 int __init __weak arch_early_irq_init(void)
 {
        return 0;
 }
-
-int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
-{
-       return 0;
-}
+#endif