perf_events, x86: Implement intel core solo/duo support
[linux-2.6.git] / kernel / hrtimer.c
index e2f91ec..0086628 100644 (file)
@@ -48,6 +48,8 @@
 
 #include <asm/uaccess.h>
 
+#include <trace/events/timer.h>
+
 /*
  * The timer bases:
  *
@@ -125,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
        for (;;) {
                base = timer->base;
                if (likely(base != NULL)) {
-                       spin_lock_irqsave(&base->cpu_base->lock, *flags);
+                       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
                        if (likely(base == timer->base))
                                return base;
                        /* The timer has migrated to another CPU: */
-                       spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
+                       raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
                }
                cpu_relax();
        }
@@ -206,13 +208,13 @@ again:
 
                /* See the comment in lock_timer_base() */
                timer->base = NULL;
-               spin_unlock(&base->cpu_base->lock);
-               spin_lock(&new_base->cpu_base->lock);
+               raw_spin_unlock(&base->cpu_base->lock);
+               raw_spin_lock(&new_base->cpu_base->lock);
 
                if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
                        cpu = this_cpu;
-                       spin_unlock(&new_base->cpu_base->lock);
-                       spin_lock(&base->cpu_base->lock);
+                       raw_spin_unlock(&new_base->cpu_base->lock);
+                       raw_spin_lock(&base->cpu_base->lock);
                        timer->base = base;
                        goto again;
                }
@@ -228,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 {
        struct hrtimer_clock_base *base = timer->base;
 
-       spin_lock_irqsave(&base->cpu_base->lock, *flags);
+       raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
 
        return base;
 }
@@ -429,6 +431,7 @@ void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
        debug_object_init_on_stack(timer, &hrtimer_debug_descr);
        __hrtimer_init(timer, clock_id, mode);
 }
+EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
 
 void destroy_hrtimer_on_stack(struct hrtimer *timer)
 {
@@ -441,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
 #endif
 
+static inline void
+debug_init(struct hrtimer *timer, clockid_t clockid,
+          enum hrtimer_mode mode)
+{
+       debug_hrtimer_init(timer);
+       trace_hrtimer_init(timer, clockid, mode);
+}
+
+static inline void debug_activate(struct hrtimer *timer)
+{
+       debug_hrtimer_activate(timer);
+       trace_hrtimer_start(timer);
+}
+
+static inline void debug_deactivate(struct hrtimer *timer)
+{
+       debug_hrtimer_deactivate(timer);
+       trace_hrtimer_cancel(timer);
+}
+
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
@@ -486,13 +509,14 @@ static inline int hrtimer_hres_active(void)
  * next event
  * Called with interrupts disabled and base->lock held
  */
-static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
+static void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
        int i;
        struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t expires;
+       ktime_t expires, expires_next;
 
-       cpu_base->expires_next.tv64 = KTIME_MAX;
+       expires_next.tv64 = KTIME_MAX;
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
                struct hrtimer *timer;
@@ -508,10 +532,15 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
                 */
                if (expires.tv64 < 0)
                        expires.tv64 = 0;
-               if (expires.tv64 < cpu_base->expires_next.tv64)
-                       cpu_base->expires_next = expires;
+               if (expires.tv64 < expires_next.tv64)
+                       expires_next = expires;
        }
 
+       if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
+               return;
+
+       cpu_base->expires_next.tv64 = expires_next.tv64;
+
        if (cpu_base->expires_next.tv64 != KTIME_MAX)
                tick_program_event(cpu_base->expires_next, 1);
 }
@@ -528,7 +557,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
 static int hrtimer_reprogram(struct hrtimer *timer,
                             struct hrtimer_clock_base *base)
 {
-       ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
+       struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
        int res;
 
@@ -553,7 +582,16 @@ static int hrtimer_reprogram(struct hrtimer *timer,
        if (expires.tv64 < 0)
                return -ETIME;
 
-       if (expires.tv64 >= expires_next->tv64)
+       if (expires.tv64 >= cpu_base->expires_next.tv64)
+               return 0;
+
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * do not schedule a timer which is earlier than the expiry
+        * which we enforced in the hang detection. We want the system
+        * to make progress.
+        */
+       if (cpu_base->hang_detected)
                return 0;
 
        /*
@@ -561,7 +599,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
         */
        res = tick_program_event(expires, 0);
        if (!IS_ERR_VALUE(res))
-               *expires_next = expires;
+               cpu_base->expires_next = expires;
        return res;
 }
 
@@ -590,12 +628,12 @@ static void retrigger_next_event(void *arg)
        base = &__get_cpu_var(hrtimer_bases);
 
        /* Adjust CLOCK_REALTIME offset */
-       spin_lock(&base->lock);
+       raw_spin_lock(&base->lock);
        base->clock_base[CLOCK_REALTIME].offset =
                timespec_to_ktime(realtime_offset);
 
-       hrtimer_force_reprogram(base);
-       spin_unlock(&base->lock);
+       hrtimer_force_reprogram(base, 0);
+       raw_spin_unlock(&base->lock);
 }
 
 /*
@@ -656,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
 {
        if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
                if (wakeup) {
-                       spin_unlock(&base->cpu_base->lock);
+                       raw_spin_unlock(&base->cpu_base->lock);
                        raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-                       spin_lock(&base->cpu_base->lock);
+                       raw_spin_lock(&base->cpu_base->lock);
                } else
                        __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
 
@@ -697,8 +735,6 @@ static int hrtimer_switch_to_hres(void)
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
        local_irq_restore(flags);
-       printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
-              smp_processor_id());
        return 1;
 }
 
@@ -707,7 +743,8 @@ static int hrtimer_switch_to_hres(void)
 static inline int hrtimer_hres_active(void) { return 0; }
 static inline int hrtimer_is_hres_enabled(void) { return 0; }
 static inline int hrtimer_switch_to_hres(void) { return 0; }
-static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
+static inline void
+hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
                                            struct hrtimer_clock_base *base,
                                            int wakeup)
@@ -719,17 +756,33 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
-#ifdef CONFIG_TIMER_STATS
-void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
+static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
 {
+#ifdef CONFIG_TIMER_STATS
        if (timer->start_site)
                return;
-
-       timer->start_site = addr;
+       timer->start_site = __builtin_return_address(0);
        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
        timer->start_pid = current->pid;
+#endif
 }
+
+static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+       timer->start_site = NULL;
 #endif
+}
+
+static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+#ifdef CONFIG_TIMER_STATS
+       if (likely(!timer_stats_active))
+               return;
+       timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+                                timer->function, timer->start_comm, 0);
+#endif
+}
 
 /*
  * Counterpart to lock_hrtimer_base above:
@@ -737,7 +790,7 @@ void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
 static inline
 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 {
-       spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
+       raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
 }
 
 /**
@@ -797,7 +850,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
        struct hrtimer *entry;
        int leftmost = 1;
 
-       debug_hrtimer_activate(timer);
+       debug_activate(timer);
 
        /*
         * Find the right place in the rbtree:
@@ -850,19 +903,29 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
-       if (timer->state & HRTIMER_STATE_ENQUEUED) {
-               /*
-                * Remove the timer from the rbtree and replace the
-                * first entry pointer if necessary.
-                */
-               if (base->first == &timer->node) {
-                       base->first = rb_next(&timer->node);
-                       /* Reprogram the clock event device. if enabled */
-                       if (reprogram && hrtimer_hres_active())
-                               hrtimer_force_reprogram(base->cpu_base);
+       if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+               goto out;
+
+       /*
+        * Remove the timer from the rbtree and replace the first
+        * entry pointer if necessary.
+        */
+       if (base->first == &timer->node) {
+               base->first = rb_next(&timer->node);
+#ifdef CONFIG_HIGH_RES_TIMERS
+               /* Reprogram the clock event device. if enabled */
+               if (reprogram && hrtimer_hres_active()) {
+                       ktime_t expires;
+
+                       expires = ktime_sub(hrtimer_get_expires(timer),
+                                           base->offset);
+                       if (base->cpu_base->expires_next.tv64 == expires.tv64)
+                               hrtimer_force_reprogram(base->cpu_base, 1);
                }
-               rb_erase(&timer->node, &base->active);
+#endif
        }
+       rb_erase(&timer->node, &base->active);
+out:
        timer->state = newstate;
 }
 
@@ -883,7 +946,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                 * reprogramming happens in the interrupt handler. This is a
                 * rare case and less expensive than a smp call.
                 */
-               debug_hrtimer_deactivate(timer);
+               debug_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
                __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -1060,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
        unsigned long flags;
        int i;
 
-       spin_lock_irqsave(&cpu_base->lock, flags);
+       raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
        if (!hrtimer_hres_active()) {
                for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
@@ -1077,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
                }
        }
 
-       spin_unlock_irqrestore(&cpu_base->lock, flags);
+       raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
        if (mindelta.tv64 < 0)
                mindelta.tv64 = 0;
@@ -1116,7 +1179,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
                  enum hrtimer_mode mode)
 {
-       debug_hrtimer_init(timer);
+       debug_init(timer, clock_id, mode);
        __hrtimer_init(timer, clock_id, mode);
 }
 EXPORT_SYMBOL_GPL(hrtimer_init);
@@ -1140,7 +1203,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 }
 EXPORT_SYMBOL_GPL(hrtimer_get_res);
 
-static void __run_hrtimer(struct hrtimer *timer)
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
 {
        struct hrtimer_clock_base *base = timer->base;
        struct hrtimer_cpu_base *cpu_base = base->cpu_base;
@@ -1149,7 +1212,7 @@ static void __run_hrtimer(struct hrtimer *timer)
 
        WARN_ON(!irqs_disabled());
 
-       debug_hrtimer_deactivate(timer);
+       debug_deactivate(timer);
        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
        timer_stats_account_hrtimer(timer);
        fn = timer->function;
@@ -1159,9 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer)
         * they get migrated to another cpu, therefore its safe to unlock
         * the timer base.
         */
-       spin_unlock(&cpu_base->lock);
+       raw_spin_unlock(&cpu_base->lock);
+       trace_hrtimer_expire_entry(timer, now);
        restart = fn(timer);
-       spin_lock(&cpu_base->lock);
+       trace_hrtimer_expire_exit(timer);
+       raw_spin_lock(&cpu_base->lock);
 
        /*
         * Note: We clear the CALLBACK bit after enqueue_hrtimer and
@@ -1177,29 +1242,6 @@ static void __run_hrtimer(struct hrtimer *timer)
 
 #ifdef CONFIG_HIGH_RES_TIMERS
 
-static int force_clock_reprogram;
-
-/*
- * After 5 iteration's attempts, we consider that hrtimer_interrupt()
- * is hanging, which could happen with something that slows the interrupt
- * such as the tracing. Then we force the clock reprogramming for each future
- * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
- * threshold that we will overwrite.
- * The next tick event will be scheduled to 3 times we currently spend on
- * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
- * 1/4 of their time to process the hrtimer interrupts. This is enough to
- * let it running without serious starvation.
- */
-
-static inline void
-hrtimer_interrupt_hanging(struct clock_event_device *dev,
-                       ktime_t try_time)
-{
-       force_clock_reprogram = 1;
-       dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
-       printk(KERN_WARNING "hrtimer: interrupt too slow, "
-               "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
-}
 /*
  * High resolution timer interrupt
  * Called with interrupts disabled
@@ -1208,24 +1250,18 @@ void hrtimer_interrupt(struct clock_event_device *dev)
 {
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
-       ktime_t expires_next, now;
-       int nr_retries = 0;
-       int i;
+       ktime_t expires_next, now, entry_time, delta;
+       int i, retries = 0;
 
        BUG_ON(!cpu_base->hres_active);
        cpu_base->nr_events++;
        dev->next_event.tv64 = KTIME_MAX;
 
- retry:
-       /* 5 retries is enough to notice a hang */
-       if (!(++nr_retries % 5))
-               hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
-
-       now = ktime_get();
-
+       entry_time = now = ktime_get();
+retry:
        expires_next.tv64 = KTIME_MAX;
 
-       spin_lock(&cpu_base->lock);
+       raw_spin_lock(&cpu_base->lock);
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1271,7 +1307,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
                                break;
                        }
 
-                       __run_hrtimer(timer);
+                       __run_hrtimer(timer, &basenow);
                }
                base++;
        }
@@ -1281,13 +1317,51 @@ void hrtimer_interrupt(struct clock_event_device *dev)
         * against it.
         */
        cpu_base->expires_next = expires_next;
-       spin_unlock(&cpu_base->lock);
+       raw_spin_unlock(&cpu_base->lock);
 
        /* Reprogramming necessary ? */
-       if (expires_next.tv64 != KTIME_MAX) {
-               if (tick_program_event(expires_next, force_clock_reprogram))
-                       goto retry;
+       if (expires_next.tv64 == KTIME_MAX ||
+           !tick_program_event(expires_next, 0)) {
+               cpu_base->hang_detected = 0;
+               return;
        }
+
+       /*
+        * The next timer was already expired due to:
+        * - tracing
+        * - long lasting callbacks
+        * - being scheduled away when running in a VM
+        *
+        * We need to prevent that we loop forever in the hrtimer
+        * interrupt routine. We give it 3 attempts to avoid
+        * overreacting on some spurious event.
+        */
+       now = ktime_get();
+       cpu_base->nr_retries++;
+       if (++retries < 3)
+               goto retry;
+       /*
+        * Give the system a chance to do something else than looping
+        * here. We stored the entry time, so we know exactly how long
+        * we spent here. We schedule the next event this amount of
+        * time away.
+        */
+       cpu_base->nr_hangs++;
+       cpu_base->hang_detected = 1;
+       delta = ktime_sub(now, entry_time);
+       if (delta.tv64 > cpu_base->max_hang_time.tv64)
+               cpu_base->max_hang_time = delta;
+       /*
+        * Limit it to a sensible value as we enforce a longer
+        * delay. Give the CPU at least 100ms to catch up.
+        */
+       if (delta.tv64 > 100 * NSEC_PER_MSEC)
+               expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
+       else
+               expires_next = ktime_add(now, delta);
+       tick_program_event(expires_next, 1);
+       printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+                   ktime_to_ns(delta));
 }
 
 /*
@@ -1383,7 +1457,7 @@ void hrtimer_run_queues(void)
                        gettime = 0;
                }
 
-               spin_lock(&cpu_base->lock);
+               raw_spin_lock(&cpu_base->lock);
 
                while ((node = base->first)) {
                        struct hrtimer *timer;
@@ -1393,9 +1467,9 @@ void hrtimer_run_queues(void)
                                        hrtimer_get_expires_tv64(timer))
                                break;
 
-                       __run_hrtimer(timer);
+                       __run_hrtimer(timer, &base->softirq_time);
                }
-               spin_unlock(&cpu_base->lock);
+               raw_spin_unlock(&cpu_base->lock);
        }
 }
 
@@ -1420,6 +1494,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
        sl->timer.function = hrtimer_wakeup;
        sl->task = task;
 }
+EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 
 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
 {
@@ -1550,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
        int i;
 
-       spin_lock_init(&cpu_base->lock);
+       raw_spin_lock_init(&cpu_base->lock);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
                cpu_base->clock_base[i].cpu_base = cpu_base;
@@ -1569,7 +1644,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
        while ((node = rb_first(&old_base->active))) {
                timer = rb_entry(node, struct hrtimer, node);
                BUG_ON(hrtimer_callback_running(timer));
-               debug_hrtimer_deactivate(timer);
+               debug_deactivate(timer);
 
                /*
                 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
@@ -1608,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
         */
-       spin_lock(&new_base->lock);
-       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+       raw_spin_lock(&new_base->lock);
+       raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                migrate_hrtimer_list(&old_base->clock_base[i],
                                     &new_base->clock_base[i]);
        }
 
-       spin_unlock(&old_base->lock);
-       spin_unlock(&new_base->lock);
+       raw_spin_unlock(&old_base->lock);
+       raw_spin_unlock(&new_base->lock);
 
        /* Check, if we got expired work to do */
        __hrtimer_peek_ahead_timers();