]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/timer.c
tracelevel module: Prioritize trace events
[linux-2.6.git] / kernel / timer.c
index c61a7949387f93e69b1a4979eff1a03c7e7131b4..8cff36119e4d50f7336a06e8d01e59e8b162e694 100644 (file)
@@ -37,8 +37,9 @@
 #include <linux/delay.h>
 #include <linux/tick.h>
 #include <linux/kallsyms.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -87,13 +88,6 @@ struct tvec_base boot_tvec_bases;
 EXPORT_SYMBOL(boot_tvec_bases);
 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
 
-/*
- * Note that all tvec_bases are 2 byte aligned and lower bit of
- * base in timer_list is guaranteed to be zero. Use the LSB for
- * the new flag to indicate whether the timer is deferrable
- */
-#define TBASE_DEFERRABLE_FLAG          (0x1)
-
 /* Functions below help us manage 'deferrable' flag */
 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
 {
@@ -107,8 +101,7 @@ static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
 
 static inline void timer_set_deferrable(struct timer_list *timer)
 {
-       timer->base = ((struct tvec_base *)((unsigned long)(timer->base) |
-                                      TBASE_DEFERRABLE_FLAG));
+       timer->base = TBASE_MAKE_DEFERRED(timer->base);
 }
 
 static inline void
@@ -318,14 +311,24 @@ unsigned long round_jiffies_up_relative(unsigned long j)
 }
 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 
-
-static inline void set_running_timer(struct tvec_base *base,
-                                       struct timer_list *timer)
+/**
+ * set_timer_slack - set the allowed slack for a timer
+ * @timer: the timer to be modified
+ * @slack_hz: the amount of time (in jiffies) allowed for rounding
+ *
+ * Set the amount of time, in jiffies, that a certain timer has
+ * in terms of slack. By setting this value, the timer subsystem
+ * will schedule the actual timer somewhere between
+ * the time mod_timer() asks for, and that time plus the slack.
+ *
+ * By setting the slack to -1, a percentage of the delay is used
+ * instead.
+ */
+void set_timer_slack(struct timer_list *timer, int slack_hz)
 {
-#ifdef CONFIG_SMP
-       base->running_timer = timer;
-#endif
+       timer->slack = slack_hz;
 }
+EXPORT_SYMBOL_GPL(set_timer_slack);
 
 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
@@ -401,6 +404,11 @@ static void timer_stats_account_timer(struct timer_list *timer) {}
 
 static struct debug_obj_descr timer_debug_descr;
 
+static void *timer_debug_hint(void *addr)
+{
+       return ((struct timer_list *) addr)->function;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -474,6 +482,7 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr timer_debug_descr = {
        .name           = "timer_list",
+       .debug_hint     = timer_debug_hint,
        .fixup_init     = timer_fixup_init,
        .fixup_activate = timer_fixup_activate,
        .fixup_free     = timer_fixup_free,
@@ -549,6 +558,7 @@ static void __init_timer(struct timer_list *timer,
 {
        timer->entry.next = NULL;
        timer->base = __raw_get_cpu_var(tvec_bases);
+       timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
        timer->start_site = NULL;
        timer->start_pid = -1;
@@ -557,6 +567,19 @@ static void __init_timer(struct timer_list *timer,
        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 }
 
+void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
+                                        const char *name,
+                                        struct lock_class_key *key,
+                                        void (*function)(unsigned long),
+                                        unsigned long data)
+{
+       timer->function = function;
+       timer->data = data;
+       init_timer_on_stack_key(timer, name, key);
+       timer_set_deferrable(timer);
+}
+EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
+
 /**
  * init_timer_key - initialize a timer
  * @timer: the timer to be initialized
@@ -659,12 +682,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
        cpu = smp_processor_id();
 
 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
-       if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
-               int preferred_cpu = get_nohz_load_balancer();
-
-               if (preferred_cpu >= 0)
-                       cpu = preferred_cpu;
-       }
+       if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
+               cpu = get_nohz_timer_target();
 #endif
        new_base = per_cpu(tvec_bases, cpu);
 
@@ -714,6 +733,45 @@ int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
+/*
+ * Decide where to put the timer while taking the slack into account
+ *
+ * Algorithm:
+ *   1) calculate the maximum (absolute) time
+ *   2) calculate the highest bit where the expires and new max are different
+ *   3) use this bit to make a mask
+ *   4) use the bitmask to round down the maximum time, so that all last
+ *      bits are zeros
+ */
+static inline
+unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
+{
+       unsigned long expires_limit, mask;
+       int bit;
+
+       if (timer->slack >= 0) {
+               expires_limit = expires + timer->slack;
+       } else {
+               long delta = expires - jiffies;
+
+               if (delta < 256)
+                       return expires;
+
+               expires_limit = expires + delta / 256;
+       }
+       mask = expires ^ expires_limit;
+       if (mask == 0)
+               return expires;
+
+       bit = find_last_bit(&mask, BITS_PER_LONG);
+
+       mask = (1 << bit) - 1;
+
+       expires_limit = expires_limit & ~(mask);
+
+       return expires_limit;
+}
+
 /**
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -736,6 +794,8 @@ EXPORT_SYMBOL(mod_timer_pending);
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
+       expires = apply_slack(timer, expires);
+
        /*
         * This is a common optimization triggered by the
         * networking code - if the timer is re-modified
@@ -859,15 +919,12 @@ int del_timer(struct timer_list *timer)
 }
 EXPORT_SYMBOL(del_timer);
 
-#ifdef CONFIG_SMP
 /**
  * try_to_del_timer_sync - Try to deactivate a timer
  * @timer: timer do del
  *
  * This function tries to deactivate a timer. Upon successful (ret >= 0)
  * exit the timer is not queued and the handler is not running on any CPU.
- *
- * It must not be called from interrupt contexts.
  */
 int try_to_del_timer_sync(struct timer_list *timer)
 {
@@ -880,6 +937,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
        if (base->running_timer == timer)
                goto out;
 
+       timer_stats_timer_clear_start_info(timer);
        ret = 0;
        if (timer_pending(timer)) {
                detach_timer(timer, 1);
@@ -895,6 +953,7 @@ out:
 }
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
+#ifdef CONFIG_SMP
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -910,6 +969,25 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
  * add_timer_on(). Upon exit the timer is not queued and the handler is
  * not running on any CPU.
  *
+ * Note: You must not hold locks that are held in interrupt context
+ *   while calling this function. Even if the lock has nothing to do
+ *   with the timer in question.  Here's why:
+ *
+ *    CPU0                             CPU1
+ *    ----                             ----
+ *                                   <SOFTIRQ>
+ *                                   call_timer_fn();
+ *                                     base->running_timer = mytimer;
+ *  spin_lock_irq(somelock);
+ *                                     <IRQ>
+ *                                        spin_lock(somelock);
+ *  del_timer_sync(mytimer);
+ *   while (base->running_timer == mytimer);
+ *
+ * Now del_timer_sync() will never return and never release somelock.
+ * The interrupt on the other CPU is waiting to grab somelock but
+ * it has interrupted the softirq that CPU0 is waiting to finish.
+ *
  * The function returns whether it has deactivated a pending timer or not.
  */
 int del_timer_sync(struct timer_list *timer)
@@ -917,12 +995,20 @@ int del_timer_sync(struct timer_list *timer)
 #ifdef CONFIG_LOCKDEP
        unsigned long flags;
 
+       /*
+        * If lockdep gives a backtrace here, please reference
+        * the synchronization rules above.
+        */
        local_irq_save(flags);
        lock_map_acquire(&timer->lockdep_map);
        lock_map_release(&timer->lockdep_map);
        local_irq_restore(flags);
 #endif
-
+       /*
+        * don't use it in hardirq context, because it
+        * could lead to deadlock.
+        */
+       WARN_ON(in_irq());
        for (;;) {
                int ret = try_to_del_timer_sync(timer);
                if (ret >= 0)
@@ -953,6 +1039,47 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
        return index;
 }
 
+static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
+                         unsigned long data)
+{
+       int preempt_count = preempt_count();
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * It is permissible to free the timer from inside the
+        * function that is called from it, this we need to take into
+        * account for lockdep too. To avoid bogus "held lock freed"
+        * warnings as well as problems when looking into
+        * timer->lockdep_map, make a copy and use that here.
+        */
+       struct lockdep_map lockdep_map = timer->lockdep_map;
+#endif
+       /*
+        * Couple the lock chain with the lock chain at
+        * del_timer_sync() by acquiring the lock_map around the fn()
+        * call here and in del_timer_sync().
+        */
+       lock_map_acquire(&lockdep_map);
+
+       trace_timer_expire_entry(timer);
+       fn(data);
+       trace_timer_expire_exit(timer);
+
+       lock_map_release(&lockdep_map);
+
+       if (preempt_count != preempt_count()) {
+               WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
+                         fn, preempt_count, preempt_count());
+               /*
+                * Restore the preempt count. That gives us a decent
+                * chance to survive and extract information. If the
+                * callback kept a lock held, bad luck, but not worse
+                * than the BUG() we had.
+                */
+               preempt_count() = preempt_count;
+       }
+}
+
 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
 
 /**
@@ -992,53 +1119,15 @@ static inline void __run_timers(struct tvec_base *base)
 
                        timer_stats_account_timer(timer);
 
-                       set_running_timer(base, timer);
+                       base->running_timer = timer;
                        detach_timer(timer, 1);
 
                        spin_unlock_irq(&base->lock);
-                       {
-                               int preempt_count = preempt_count();
-
-#ifdef CONFIG_LOCKDEP
-                               /*
-                                * It is permissible to free the timer from
-                                * inside the function that is called from
-                                * it, this we need to take into account for
-                                * lockdep too. To avoid bogus "held lock
-                                * freed" warnings as well as problems when
-                                * looking into timer->lockdep_map, make a
-                                * copy and use that here.
-                                */
-                               struct lockdep_map lockdep_map =
-                                       timer->lockdep_map;
-#endif
-                               /*
-                                * Couple the lock chain with the lock chain at
-                                * del_timer_sync() by acquiring the lock_map
-                                * around the fn() call here and in
-                                * del_timer_sync().
-                                */
-                               lock_map_acquire(&lockdep_map);
-
-                               trace_timer_expire_entry(timer);
-                               fn(data);
-                               trace_timer_expire_exit(timer);
-
-                               lock_map_release(&lockdep_map);
-
-                               if (preempt_count != preempt_count()) {
-                                       printk(KERN_ERR "huh, entered %p "
-                                              "with preempt_count %08x, exited"
-                                              " with %08x?\n",
-                                              fn, preempt_count,
-                                              preempt_count());
-                                       BUG();
-                               }
-                       }
+                       call_timer_fn(timer, fn, data);
                        spin_lock_irq(&base->lock);
                }
        }
-       set_running_timer(base, NULL);
+       base->running_timer = NULL;
        spin_unlock_irq(&base->lock);
 }
 
@@ -1168,9 +1257,15 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
  */
 unsigned long get_next_timer_interrupt(unsigned long now)
 {
-       struct tvec_base *base = __get_cpu_var(tvec_bases);
+       struct tvec_base *base = __this_cpu_read(tvec_bases);
        unsigned long expires;
 
+       /*
+        * Pretend that there is no timer pending if the cpu is offline.
+        * Possible pending timers will be migrated later to an active cpu.
+        */
+       if (cpu_is_offline(smp_processor_id()))
+               return now + NEXT_TIMER_MAX_DELTA;
        spin_lock(&base->lock);
        if (time_before_eq(base->next_timer, base->timer_jiffies))
                base->next_timer = __next_timer_interrupt(base);
@@ -1198,7 +1293,10 @@ void update_process_times(int user_tick)
        run_local_timers();
        rcu_check_callbacks(cpu, user_tick);
        printk_tick();
-       perf_event_do_pending();
+#ifdef CONFIG_IRQ_WORK
+       if (in_irq())
+               irq_work_run();
+#endif
        scheduler_tick();
        run_posix_cpu_timers(p);
 }
@@ -1208,7 +1306,7 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-       struct tvec_base *base = __get_cpu_var(tvec_bases);
+       struct tvec_base *base = __this_cpu_read(tvec_bases);
 
        hrtimer_run_pending();
 
@@ -1223,20 +1321,6 @@ void run_local_timers(void)
 {
        hrtimer_run_queues();
        raise_softirq(TIMER_SOFTIRQ);
-       softlockup_tick();
-}
-
-/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
- */
-
-void do_timer(unsigned long ticks)
-{
-       jiffies_64 += ticks;
-       update_wall_time();
-       calc_global_load();
 }
 
 #ifdef __ARCH_WANT_SYS_ALARM
@@ -1618,11 +1702,14 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
+       int err;
+
        switch(action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (init_timers_cpu(cpu) < 0)
-                       return NOTIFY_BAD;
+               err = init_timers_cpu(cpu);
+               if (err < 0)
+                       return notifier_from_errno(err);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
@@ -1648,7 +1735,7 @@ void __init init_timers(void)
 
        init_timer_stats();
 
-       BUG_ON(err == NOTIFY_BAD);
+       BUG_ON(err != NOTIFY_OK);
        register_cpu_notifier(&timers_nb);
        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
@@ -1681,3 +1768,25 @@ unsigned long msleep_interruptible(unsigned int msecs)
 }
 
 EXPORT_SYMBOL(msleep_interruptible);
+
+static int __sched do_usleep_range(unsigned long min, unsigned long max)
+{
+       ktime_t kmin;
+       unsigned long delta;
+
+       kmin = ktime_set(0, min * NSEC_PER_USEC);
+       delta = (max - min) * NSEC_PER_USEC;
+       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
+}
+
+/**
+ * usleep_range - Drop in replacement for udelay where wakeup is flexible
+ * @min: Minimum time in usecs to sleep
+ * @max: Maximum time in usecs to sleep
+ */
+void usleep_range(unsigned long min, unsigned long max)
+{
+       __set_current_state(TASK_UNINTERRUPTIBLE);
+       do_usleep_range(min, max);
+}
+EXPORT_SYMBOL(usleep_range);