perf_events, x86: Implement intel core solo/duo support
[linux-2.6.git] / kernel / lockdep.c
index f74d2d7..5feaddc 100644 (file)
@@ -49,7 +49,7 @@
 #include "lockdep_internals.h"
 
 #define CREATE_TRACE_POINTS
-#include <trace/events/lockdep.h>
+#include <trace/events/lock.h>
 
 #ifdef CONFIG_PROVE_LOCKING
 int prove_locking = 1;
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
  * to use a raw spinlock - we really dont want the spinlock
  * code to recurse back into the lockdep code...
  */
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 static int graph_lock(void)
 {
-       __raw_spin_lock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
        /*
         * Make sure that if another CPU detected a bug while
         * walking the graph we dont change it (while the other
@@ -85,7 +85,7 @@ static int graph_lock(void)
         * dropped already)
         */
        if (!debug_locks) {
-               __raw_spin_unlock(&lockdep_lock);
+               arch_spin_unlock(&lockdep_lock);
                return 0;
        }
        /* prevent any recursions within lockdep from causing deadlocks */
@@ -95,11 +95,11 @@ static int graph_lock(void)
 
 static inline int graph_unlock(void)
 {
-       if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+       if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
        current->lockdep_recursion--;
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
        return 0;
 }
 
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
 {
        int ret = debug_locks_off();
 
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
 
        return ret;
 }
@@ -140,7 +140,13 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 }
 
 #ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+                     cpu_lock_stats);
+
+static inline u64 lockstat_clock(void)
+{
+       return cpu_clock(smp_processor_id());
+}
 
 static int lock_point(unsigned long points[], unsigned long ip)
 {
@@ -158,12 +164,12 @@ static int lock_point(unsigned long points[], unsigned long ip)
        return i;
 }
 
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
 {
        if (time > lt->max)
                lt->max = time;
 
-       if (time < lt->min || !lt->min)
+       if (time < lt->min || !lt->nr)
                lt->min = time;
 
        lt->total += time;
@@ -172,8 +178,15 @@ static void lock_time_inc(struct lock_time *lt, s64 time)
 
 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 {
-       dst->min += src->min;
-       dst->max += src->max;
+       if (!src->nr)
+               return;
+
+       if (src->max > dst->max)
+               dst->max = src->max;
+
+       if (src->min < dst->min || !dst->nr)
+               dst->min = src->min;
+
        dst->total += src->total;
        dst->nr += src->nr;
 }
@@ -186,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
        memset(&stats, 0, sizeof(struct lock_class_stats));
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *pcs =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
@@ -213,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
 
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *cpu_stats =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
@@ -223,23 +236,23 @@ void clear_lock_stats(struct lock_class *class)
 
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 {
-       return &get_cpu_var(lock_stats)[class - lock_classes];
+       return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 }
 
 static void put_lock_stats(struct lock_class_stats *stats)
 {
-       put_cpu_var(lock_stats);
+       put_cpu_var(cpu_lock_stats);
 }
 
 static void lock_release_holdtime(struct held_lock *hlock)
 {
        struct lock_class_stats *stats;
-       s64 holdtime;
+       u64 holdtime;
 
        if (!lock_stat)
                return;
 
-       holdtime = sched_clock() - hlock->holdtime_stamp;
+       holdtime = lockstat_clock() - hlock->holdtime_stamp;
 
        stats = get_lock_stats(hlock_class(hlock));
        if (hlock->read)
@@ -374,7 +387,8 @@ static int save_trace(struct stack_trace *trace)
         * complete trace that maxes out the entries provided will be reported
         * as incomplete, friggin useless </rant>
         */
-       if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+       if (trace->nr_entries != 0 &&
+           trace->entries[trace->nr_entries-1] == ULONG_MAX)
                trace->nr_entries--;
 
        trace->max_entries = trace->nr_entries;
@@ -578,6 +592,9 @@ static int static_obj(void *obj)
        if ((addr >= start) && (addr < end))
                return 1;
 
+       if (arch_is_kernel_data(addr))
+               return 1;
+
 #ifdef CONFIG_SMP
        /*
         * percpu var?
@@ -1153,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        this.class = class;
 
        local_irq_save(flags);
-       __raw_spin_lock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_forward_deps(&this);
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
        local_irq_restore(flags);
 
        return ret;
@@ -1180,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
        this.class = class;
 
        local_irq_save(flags);
-       __raw_spin_lock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_backward_deps(&this);
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
        local_irq_restore(flags);
 
        return ret;
@@ -2789,7 +2806,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->references = references;
 #ifdef CONFIG_LOCK_STAT
        hlock->waittime_stamp = 0;
-       hlock->holdtime_stamp = sched_clock();
+       hlock->holdtime_stamp = lockstat_clock();
 #endif
 
        if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3319,7 +3336,7 @@ found_it:
        if (hlock->instance != lock)
                return;
 
-       hlock->waittime_stamp = sched_clock();
+       hlock->waittime_stamp = lockstat_clock();
 
        contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
        contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3342,8 +3359,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       u64 now;
-       s64 waittime = 0;
+       u64 now, waittime = 0;
        int i, cpu;
 
        depth = curr->lockdep_depth;
@@ -3371,7 +3387,7 @@ found_it:
 
        cpu = smp_processor_id();
        if (hlock->waittime_stamp) {
-               now = sched_clock();
+               now = lockstat_clock();
                waittime = now - hlock->waittime_stamp;
                hlock->holdtime_stamp = now;
        }