sched: Enable might_sleep before initializing drivers.
[linux-2.6.git] / kernel / time / tick-common.c
index 019315e..119528d 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
-#include <linux/tick.h>
 
 #include <asm/irq_regs.h>
 
@@ -33,8 +32,8 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
  */
 ktime_t tick_next_period;
 ktime_t tick_period;
-int tick_do_timer_cpu __read_mostly = -1;
-DEFINE_SPINLOCK(tick_device_lock);
+int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
+static DEFINE_RAW_SPINLOCK(tick_device_lock);
 
 /*
  * Debugging: see timer_list.c
@@ -49,9 +48,13 @@ struct tick_device *tick_get_device(int cpu)
  */
 int tick_is_oneshot_available(void)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-       return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
+       if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
+               return 0;
+       if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
+               return 1;
+       return tick_broadcast_oneshot_available();
 }
 
 /*
@@ -93,7 +96,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
        for (;;) {
                if (!clockevents_program_event(dev, next, ktime_get()))
                        return;
-               tick_periodic(cpu);
+               /*
+                * Have to be careful here. If we're in oneshot mode,
+                * before we call tick_periodic() in a loop, we need
+                * to be sure we're using a real hardware clocksource.
+                * Otherwise we could get trapped in an infinite
+                * loop, as the tick_periodic() increments jiffies,
+                * when then will increment time, posibly causing
+                * the loop to trigger again and again.
+                */
+               if (timekeeping_valid_for_hres())
+                       tick_periodic(cpu);
                next = ktime_add(next, tick_period);
        }
 }
@@ -109,7 +122,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
        if (!tick_device_is_functional(dev))
                return;
 
-       if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
+       if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
+           !tick_broadcast_oneshot_active()) {
                clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
        } else {
                unsigned long seq;
@@ -135,7 +149,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
  */
 static void tick_setup_device(struct tick_device *td,
                              struct clock_event_device *newdev, int cpu,
-                             const cpumask_t *cpumask)
+                             const struct cpumask *cpumask)
 {
        ktime_t next_event;
        void (*handler)(struct clock_event_device *) = NULL;
@@ -148,7 +162,7 @@ static void tick_setup_device(struct tick_device *td,
                 * If no cpu took the do_timer update, assign it to
                 * this cpu:
                 */
-               if (tick_do_timer_cpu == -1) {
+               if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
                        tick_do_timer_cpu = cpu;
                        tick_next_period = ktime_get();
                        tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
@@ -170,8 +184,8 @@ static void tick_setup_device(struct tick_device *td,
         * When the device is not per cpu, pin the interrupt to the
         * current cpu:
         */
-       if (!cpus_equal(newdev->cpumask, *cpumask))
-               irq_set_affinity(newdev->irq, *cpumask);
+       if (!cpumask_equal(newdev->cpumask, cpumask))
+               irq_set_affinity(newdev->irq, cpumask);
 
        /*
         * When global broadcasting is active, check if the current
@@ -198,17 +212,17 @@ static int tick_check_new_device(struct clock_event_device *newdev)
        int cpu, ret = NOTIFY_OK;
        unsigned long flags;
 
-       spin_lock_irqsave(&tick_device_lock, flags);
+       raw_spin_lock_irqsave(&tick_device_lock, flags);
 
        cpu = smp_processor_id();
-       if (!cpu_isset(cpu, newdev->cpumask))
+       if (!cpumask_test_cpu(cpu, newdev->cpumask))
                goto out_bc;
 
        td = &per_cpu(tick_cpu_device, cpu);
        curdev = td->evtdev;
 
        /* cpu local device ? */
-       if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
+       if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
 
                /*
                 * If the cpu affinity of the device interrupt can not
@@ -221,7 +235,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
                 * If we have a cpu local device already, do not replace it
                 * by a non cpu local device
                 */
-               if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
+               if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
                        goto out_bc;
        }
 
@@ -253,11 +267,11 @@ static int tick_check_new_device(struct clock_event_device *newdev)
                curdev = NULL;
        }
        clockevents_exchange_device(curdev, newdev);
-       tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
+       tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
        if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
                tick_oneshot_notify();
 
-       spin_unlock_irqrestore(&tick_device_lock, flags);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
        return NOTIFY_STOP;
 
 out_bc:
@@ -267,12 +281,27 @@ out_bc:
        if (tick_check_broadcast_device(newdev))
                ret = NOTIFY_STOP;
 
-       spin_unlock_irqrestore(&tick_device_lock, flags);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 
        return ret;
 }
 
 /*
+ * Transfer the do_timer job away from a dying cpu.
+ *
+ * Called with interrupts disabled.
+ */
+static void tick_handover_do_timer(int *cpup)
+{
+       if (*cpup == tick_do_timer_cpu) {
+               int cpu = cpumask_first(cpu_online_mask);
+
+               tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
+                       TICK_DO_TIMER_NONE;
+       }
+}
+
+/*
  * Shutdown an event device on a given cpu:
  *
  * This is called on a life CPU, when a CPU is dead. So we cannot
@@ -285,7 +314,7 @@ static void tick_shutdown(unsigned int *cpup)
        struct clock_event_device *dev = td->evtdev;
        unsigned long flags;
 
-       spin_lock_irqsave(&tick_device_lock, flags);
+       raw_spin_lock_irqsave(&tick_device_lock, flags);
        td->mode = TICKDEV_MODE_PERIODIC;
        if (dev) {
                /*
@@ -296,13 +325,7 @@ static void tick_shutdown(unsigned int *cpup)
                clockevents_exchange_device(dev, NULL);
                td->evtdev = NULL;
        }
-       /* Transfer the do_timer job away from this cpu */
-       if (*cpup == tick_do_timer_cpu) {
-               int cpu = first_cpu(cpu_online_map);
-
-               tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1;
-       }
-       spin_unlock_irqrestore(&tick_device_lock, flags);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
 static void tick_suspend(void)
@@ -310,9 +333,9 @@ static void tick_suspend(void)
        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
        unsigned long flags;
 
-       spin_lock_irqsave(&tick_device_lock, flags);
+       raw_spin_lock_irqsave(&tick_device_lock, flags);
        clockevents_shutdown(td->evtdev);
-       spin_unlock_irqrestore(&tick_device_lock, flags);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
 static void tick_resume(void)
@@ -321,7 +344,7 @@ static void tick_resume(void)
        unsigned long flags;
        int broadcast = tick_resume_broadcast();
 
-       spin_lock_irqsave(&tick_device_lock, flags);
+       raw_spin_lock_irqsave(&tick_device_lock, flags);
        clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
 
        if (!broadcast) {
@@ -330,7 +353,7 @@ static void tick_resume(void)
                else
                        tick_resume_oneshot();
        }
-       spin_unlock_irqrestore(&tick_device_lock, flags);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
 /*
@@ -355,6 +378,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
                tick_broadcast_oneshot_control(reason);
                break;
 
+       case CLOCK_EVT_NOTIFY_CPU_DYING:
+               tick_handover_do_timer(dev);
+               break;
+
        case CLOCK_EVT_NOTIFY_CPU_DEAD:
                tick_shutdown_broadcast_oneshot(dev);
                tick_shutdown_broadcast(dev);