cgroups: clean up cgroup_pidlist_find() a bit
[linux-2.6.git] / kernel / lockdep.c
index 11832ac..0c30d04 100644 (file)
@@ -25,6 +25,7 @@
  * Thanks to Arjan van de Ven for coming up with the initial idea of
  * mapping lock dependencies runtime.
  */
+#define DISABLE_BRANCH_PROFILING
 #include <linux/mutex.h>
 #include <linux/sched.h>
 #include <linux/delay.h>
 #include <linux/utsname.h>
 #include <linux/hash.h>
 #include <linux/ftrace.h>
+#include <linux/stringify.h>
+#include <linux/bitops.h>
 
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/lock.h>
+
 #ifdef CONFIG_PROVE_LOCKING
 int prove_locking = 1;
 module_param(prove_locking, int, 0644);
@@ -67,11 +73,11 @@ module_param(lock_stat, int, 0644);
  * to use a raw spinlock - we really dont want the spinlock
  * code to recurse back into the lockdep code...
  */
-static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 static int graph_lock(void)
 {
-       __raw_spin_lock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
        /*
         * Make sure that if another CPU detected a bug while
         * walking the graph we dont change it (while the other
@@ -79,7 +85,7 @@ static int graph_lock(void)
         * dropped already)
         */
        if (!debug_locks) {
-               __raw_spin_unlock(&lockdep_lock);
+               arch_spin_unlock(&lockdep_lock);
                return 0;
        }
        /* prevent any recursions within lockdep from causing deadlocks */
@@ -89,11 +95,11 @@ static int graph_lock(void)
 
 static inline int graph_unlock(void)
 {
-       if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
+       if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
        current->lockdep_recursion--;
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
        return 0;
 }
 
@@ -105,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
 {
        int ret = debug_locks_off();
 
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_unlock(&lockdep_lock);
 
        return ret;
 }
@@ -134,30 +140,36 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
 }
 
 #ifdef CONFIG_LOCK_STAT
-static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+                     cpu_lock_stats);
 
-static int lock_contention_point(struct lock_class *class, unsigned long ip)
+static inline u64 lockstat_clock(void)
+{
+       return cpu_clock(smp_processor_id());
+}
+
+static int lock_point(unsigned long points[], unsigned long ip)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
-               if (class->contention_point[i] == 0) {
-                       class->contention_point[i] = ip;
+       for (i = 0; i < LOCKSTAT_POINTS; i++) {
+               if (points[i] == 0) {
+                       points[i] = ip;
                        break;
                }
-               if (class->contention_point[i] == ip)
+               if (points[i] == ip)
                        break;
        }
 
        return i;
 }
 
-static void lock_time_inc(struct lock_time *lt, s64 time)
+static void lock_time_inc(struct lock_time *lt, u64 time)
 {
        if (time > lt->max)
                lt->max = time;
 
-       if (time < lt->min || !lt->min)
+       if (time < lt->min || !lt->nr)
                lt->min = time;
 
        lt->total += time;
@@ -166,8 +178,15 @@ static void lock_time_inc(struct lock_time *lt, s64 time)
 
 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
 {
-       dst->min += src->min;
-       dst->max += src->max;
+       if (!src->nr)
+               return;
+
+       if (src->max > dst->max)
+               dst->max = src->max;
+
+       if (src->min < dst->min || !dst->nr)
+               dst->min = src->min;
+
        dst->total += src->total;
        dst->nr += src->nr;
 }
@@ -180,11 +199,14 @@ struct lock_class_stats lock_stats(struct lock_class *class)
        memset(&stats, 0, sizeof(struct lock_class_stats));
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *pcs =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
 
+               for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
+                       stats.contending_point[i] += pcs->contending_point[i];
+
                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
 
@@ -204,32 +226,33 @@ void clear_lock_stats(struct lock_class *class)
 
        for_each_possible_cpu(cpu) {
                struct lock_class_stats *cpu_stats =
-                       &per_cpu(lock_stats, cpu)[class - lock_classes];
+                       &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
 
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
        memset(class->contention_point, 0, sizeof(class->contention_point));
+       memset(class->contending_point, 0, sizeof(class->contending_point));
 }
 
 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
 {
-       return &get_cpu_var(lock_stats)[class - lock_classes];
+       return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
 }
 
 static void put_lock_stats(struct lock_class_stats *stats)
 {
-       put_cpu_var(lock_stats);
+       put_cpu_var(cpu_lock_stats);
 }
 
 static void lock_release_holdtime(struct held_lock *hlock)
 {
        struct lock_class_stats *stats;
-       s64 holdtime;
+       u64 holdtime;
 
        if (!lock_stat)
                return;
 
-       holdtime = sched_clock() - hlock->holdtime_stamp;
+       holdtime = lockstat_clock() - hlock->holdtime_stamp;
 
        stats = get_lock_stats(hlock_class(hlock));
        if (hlock->read)
@@ -287,14 +310,12 @@ void lockdep_off(void)
 {
        current->lockdep_recursion++;
 }
-
 EXPORT_SYMBOL(lockdep_off);
 
 void lockdep_on(void)
 {
        current->lockdep_recursion--;
 }
-
 EXPORT_SYMBOL(lockdep_on);
 
 /*
@@ -307,12 +328,14 @@ EXPORT_SYMBOL(lockdep_on);
 #if VERBOSE
 # define HARDIRQ_VERBOSE       1
 # define SOFTIRQ_VERBOSE       1
+# define RECLAIM_VERBOSE       1
 #else
 # define HARDIRQ_VERBOSE       0
 # define SOFTIRQ_VERBOSE       0
+# define RECLAIM_VERBOSE       0
 #endif
 
-#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
+#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
 /*
  * Quick filtering for interesting events:
  */
@@ -357,11 +380,22 @@ static int save_trace(struct stack_trace *trace)
 
        save_stack_trace(trace);
 
+       /*
+        * Some daft arches put -1 at the end to indicate its a full trace.
+        *
+        * <rant> this is buggy anyway, since it takes a whole extra entry so a
+        * complete trace that maxes out the entries provided will be reported
+        * as incomplete, friggin useless </rant>
+        */
+       if (trace->nr_entries != 0 &&
+           trace->entries[trace->nr_entries-1] == ULONG_MAX)
+               trace->nr_entries--;
+
        trace->max_entries = trace->nr_entries;
 
        nr_stack_trace_entries += trace->nr_entries;
 
-       if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
+       if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
                if (!debug_locks_off_graph_unlock())
                        return 0;
 
@@ -379,20 +413,6 @@ unsigned int nr_hardirq_chains;
 unsigned int nr_softirq_chains;
 unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
-unsigned int max_recursion_depth;
-
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
-                                    unsigned int depth)
-{
-       if (!depth)
-               lockdep_dependency_gen_id++;
-       if (source->dep_gen_id == lockdep_dependency_gen_id)
-               return true;
-       source->dep_gen_id = lockdep_dependency_gen_id;
-       return false;
-}
 
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
@@ -422,35 +442,26 @@ atomic_t redundant_softirqs_on;
 atomic_t redundant_softirqs_off;
 atomic_t nr_unused_locks;
 atomic_t nr_cyclic_checks;
-atomic_t nr_cyclic_check_recursions;
 atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_forwards_recursions;
 atomic_t nr_find_usage_backwards_checks;
-atomic_t nr_find_usage_backwards_recursions;
-# define debug_atomic_inc(ptr)         atomic_inc(ptr)
-# define debug_atomic_dec(ptr)         atomic_dec(ptr)
-# define debug_atomic_read(ptr)                atomic_read(ptr)
-#else
-# define debug_atomic_inc(ptr)         do { } while (0)
-# define debug_atomic_dec(ptr)         do { } while (0)
-# define debug_atomic_read(ptr)                0
 #endif
 
 /*
  * Locking printouts:
  */
 
+#define __USAGE(__STATE)                                               \
+       [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
+       [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
+       [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
+       [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
+
 static const char *usage_str[] =
 {
-       [LOCK_USED] =                   "initial-use ",
-       [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
-       [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
-       [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
-       [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
-       [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
-       [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
-       [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
-       [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
+#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+       [LOCK_USED] = "INITIAL USE",
 };
 
 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -458,46 +469,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
        return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
 }
 
-void
-get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
+static inline unsigned long lock_flag(enum lock_usage_bit bit)
 {
-       *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
-
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
-               *c1 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
-                       *c1 = '-';
+       return 1UL << bit;
+}
 
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
-               *c2 = '+';
-       else
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
-                       *c2 = '-';
+static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
+{
+       char c = '.';
 
-       if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-               *c3 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
-               *c3 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
-                       *c3 = '?';
+       if (class->usage_mask & lock_flag(bit + 2))
+               c = '+';
+       if (class->usage_mask & lock_flag(bit)) {
+               c = '-';
+               if (class->usage_mask & lock_flag(bit + 2))
+                       c = '?';
        }
 
-       if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-               *c4 = '-';
-       if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
-               *c4 = '+';
-               if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
-                       *c4 = '?';
-       }
+       return c;
+}
+
+void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
+{
+       int i = 0;
+
+#define LOCKDEP_STATE(__STATE)                                                 \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
+       usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+
+       usage[i] = '\0';
 }
 
 static void print_lock_name(struct lock_class *class)
 {
-       char str[KSYM_NAME_LEN], c1, c2, c3, c4;
+       char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
        const char *name;
 
-       get_usage_chars(class, &c1, &c2, &c3, &c4);
+       get_usage_chars(class, usage);
 
        name = class->name;
        if (!name) {
@@ -510,7 +520,7 @@ static void print_lock_name(struct lock_class *class)
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
-       printk("){%c%c%c%c}", c1, c2, c3, c4);
+       printk("){%s}", usage);
 }
 
 static void print_lockdep_cache(struct lockdep_map *lock)
@@ -549,57 +559,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
        }
 }
 
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
-       int bit;
-
-       printk("%*s->", depth, "");
-       print_lock_name(class);
-       printk(" ops: %lu", class->ops);
-       printk(" {\n");
-
-       for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
-               if (class->usage_mask & (1 << bit)) {
-                       int len = depth;
-
-                       len += printk("%*s   %s", depth, "", usage_str[bit]);
-                       len += printk(" at:\n");
-                       print_stack_trace(class->usage_traces + bit, len);
-               }
-       }
-       printk("%*s }\n", depth, "");
-
-       printk("%*s ... key      at: ",depth,"");
-       print_ip_sym((unsigned long)class->key);
-}
-
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void print_lock_dependencies(struct lock_class *class, int depth)
-{
-       struct lock_list *entry;
-
-       if (lockdep_dependency_visit(class, depth))
-               return;
-
-       if (DEBUG_LOCKS_WARN_ON(depth >= 20))
-               return;
-
-       print_lock_class_header(class, depth);
-
-       list_for_each_entry(entry, &class->locks_after, entry) {
-               if (DEBUG_LOCKS_WARN_ON(!entry->class))
-                       return;
-
-               print_lock_dependencies(entry->class, depth + 1);
-
-               printk("%*s ... acquired at:\n",depth,"");
-               print_stack_trace(&entry->trace, 2);
-               printk("\n");
-       }
-}
-
 static void print_kernel_version(void)
 {
        printk("%s %.*s\n", init_utsname()->release,
@@ -633,6 +592,9 @@ static int static_obj(void *obj)
        if ((addr >= start) && (addr < end))
                return 1;
 
+       if (arch_is_kernel_data(addr))
+               return 1;
+
 #ifdef CONFIG_SMP
        /*
         * percpu var?
@@ -792,6 +754,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        class = lock_classes + nr_lock_classes++;
@@ -855,6 +818,7 @@ static struct lock_list *alloc_list_entry(void)
 
                printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return NULL;
        }
        return list_entries + nr_list_entries++;
@@ -893,22 +857,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 }
 
 /*
+ * For good efficiency of modular, we use power of 2
+ */
+#define MAX_CIRCULAR_QUEUE_SIZE                4096UL
+#define CQ_MASK                                (MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/*
+ * The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ */
+struct circular_queue {
+       unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+       unsigned int  front, rear;
+};
+
+static struct circular_queue lock_cq;
+
+unsigned int max_bfs_queue_depth;
+
+static unsigned int lockdep_dependency_gen_id;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+       cq->front = cq->rear = 0;
+       lockdep_dependency_gen_id++;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+       return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+       return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+       if (__cq_full(cq))
+               return -1;
+
+       cq->element[cq->rear] = elem;
+       cq->rear = (cq->rear + 1) & CQ_MASK;
+       return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+       if (__cq_empty(cq))
+               return -1;
+
+       *elem = cq->element[cq->front];
+       cq->front = (cq->front + 1) & CQ_MASK;
+       return 0;
+}
+
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
+{
+       return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+                                       struct lock_list *parent)
+{
+       unsigned long nr;
+
+       nr = lock - list_entries;
+       WARN_ON(nr >= nr_list_entries);
+       lock->parent = parent;
+       lock->class->dep_gen_id = lockdep_dependency_gen_id;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+       unsigned long nr;
+
+       nr = lock - list_entries;
+       WARN_ON(nr >= nr_list_entries);
+       return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+       return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+       int depth = 0;
+       struct lock_list *parent;
+
+       while ((parent = get_lock_parent(child))) {
+               child = parent;
+               depth++;
+       }
+       return depth;
+}
+
+static int __bfs(struct lock_list *source_entry,
+                void *data,
+                int (*match)(struct lock_list *entry, void *data),
+                struct lock_list **target_entry,
+                int forward)
+{
+       struct lock_list *entry;
+       struct list_head *head;
+       struct circular_queue *cq = &lock_cq;
+       int ret = 1;
+
+       if (match(source_entry, data)) {
+               *target_entry = source_entry;
+               ret = 0;
+               goto exit;
+       }
+
+       if (forward)
+               head = &source_entry->class->locks_after;
+       else
+               head = &source_entry->class->locks_before;
+
+       if (list_empty(head))
+               goto exit;
+
+       __cq_init(cq);
+       __cq_enqueue(cq, (unsigned long)source_entry);
+
+       while (!__cq_empty(cq)) {
+               struct lock_list *lock;
+
+               __cq_dequeue(cq, (unsigned long *)&lock);
+
+               if (!lock->class) {
+                       ret = -2;
+                       goto exit;
+               }
+
+               if (forward)
+                       head = &lock->class->locks_after;
+               else
+                       head = &lock->class->locks_before;
+
+               list_for_each_entry(entry, head, entry) {
+                       if (!lock_accessed(entry)) {
+                               unsigned int cq_depth;
+                               mark_lock_accessed(entry, lock);
+                               if (match(entry, data)) {
+                                       *target_entry = entry;
+                                       ret = 0;
+                                       goto exit;
+                               }
+
+                               if (__cq_enqueue(cq, (unsigned long)entry)) {
+                                       ret = -1;
+                                       goto exit;
+                               }
+                               cq_depth = __cq_get_elem_count(cq);
+                               if (max_bfs_queue_depth < cq_depth)
+                                       max_bfs_queue_depth = cq_depth;
+                       }
+               }
+       }
+exit:
+       return ret;
+}
+
+static inline int __bfs_forwards(struct lock_list *src_entry,
+                       void *data,
+                       int (*match)(struct lock_list *entry, void *data),
+                       struct lock_list **target_entry)
+{
+       return __bfs(src_entry, data, match, target_entry, 1);
+
+}
+
+static inline int __bfs_backwards(struct lock_list *src_entry,
+                       void *data,
+                       int (*match)(struct lock_list *entry, void *data),
+                       struct lock_list **target_entry)
+{
+       return __bfs(src_entry, data, match, target_entry, 0);
+
+}
+
+/*
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
  * checking.
- *
- * (to keep the stackframe of the recursive functions small we
- *  use these global variables, and we also mark various helper
- *  functions as noinline.)
  */
-static struct held_lock *check_source, *check_target;
 
 /*
  * Print a dependency chain entry (this is only done when a deadlock
  * has been detected):
  */
 static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
 {
        if (debug_locks_silent)
                return 0;
@@ -925,11 +1070,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
  * header first:
  */
 static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+                       struct held_lock *check_src,
+                       struct held_lock *check_tgt)
 {
        struct task_struct *curr = current;
 
-       if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+       if (debug_locks_silent)
                return 0;
 
        printk("\n=======================================================\n");
@@ -938,9 +1085,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
        printk(  "-------------------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, task_pid_nr(curr));
-       print_lock(check_source);
+       print_lock(check_src);
        printk("\nbut task is already holding lock:\n");
-       print_lock(check_target);
+       print_lock(check_tgt);
        printk("\nwhich lock already depends on the new lock.\n\n");
        printk("\nthe existing dependency chain (in reverse order) is:\n");
 
@@ -949,19 +1096,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
        return 0;
 }
 
-static noinline int print_circular_bug_tail(void)
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+       return entry->class == data;
+}
+
+static noinline int print_circular_bug(struct lock_list *this,
+                               struct lock_list *target,
+                               struct held_lock *check_src,
+                               struct held_lock *check_tgt)
 {
        struct task_struct *curr = current;
-       struct lock_list this;
+       struct lock_list *parent;
+       int depth;
 
-       if (debug_locks_silent)
+       if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
 
-       this.class = hlock_class(check_source);
-       if (!save_trace(&this.trace))
+       if (!save_trace(&this->trace))
                return 0;
 
-       print_circular_bug_entry(&this, 0);
+       depth = get_lock_depth(target);
+
+       print_circular_bug_header(target, depth, check_src, check_tgt);
+
+       parent = get_lock_parent(target);
+
+       while (parent) {
+               print_circular_bug_entry(parent, --depth);
+               parent = get_lock_parent(parent);
+       }
 
        printk("\nother info that might help us debug this:\n\n");
        lockdep_print_held_locks(curr);
@@ -972,74 +1136,70 @@ static noinline int print_circular_bug_tail(void)
        return 0;
 }
 
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
+static noinline int print_bfs_bug(int ret)
 {
        if (!debug_locks_off_graph_unlock())
                return 0;
 
-       WARN_ON(1);
+       WARN(1, "lockdep bfs error:%d\n", ret);
 
        return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
-                                          unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
 {
-       struct lock_list *entry;
-       unsigned long ret = 1;
+       (*(unsigned long *)data)++;
+       return 0;
+}
 
-       if (lockdep_dependency_visit(class, depth))
-               return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+       unsigned long  count = 0;
+       struct lock_list *uninitialized_var(target_entry);
 
-       /*
-        * Recurse this class's dependency list:
-        */
-       list_for_each_entry(entry, &class->locks_after, entry)
-               ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+       __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
 
-       return ret;
+       return count;
 }
-
 unsigned long lockdep_count_forward_deps(struct lock_class *class)
 {
        unsigned long ret, flags;
+       struct lock_list this;
+
+       this.parent = NULL;
+       this.class = class;
 
        local_irq_save(flags);
-       __raw_spin_lock(&lockdep_lock);
-       ret = __lockdep_count_forward_deps(class, 0);
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_forward_deps(&this);
+       arch_spin_unlock(&lockdep_lock);
        local_irq_restore(flags);
 
        return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
-                                           unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
-       struct lock_list *entry;
-       unsigned long ret = 1;
+       unsigned long  count = 0;
+       struct lock_list *uninitialized_var(target_entry);
 
-       if (lockdep_dependency_visit(class, depth))
-               return 0;
-       /*
-        * Recurse this class's dependency list:
-        */
-       list_for_each_entry(entry, &class->locks_before, entry)
-               ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+       __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
 
-       return ret;
+       return count;
 }
 
 unsigned long lockdep_count_backward_deps(struct lock_class *class)
 {
        unsigned long ret, flags;
+       struct lock_list this;
+
+       this.parent = NULL;
+       this.class = class;
 
        local_irq_save(flags);
-       __raw_spin_lock(&lockdep_lock);
-       ret = __lockdep_count_backward_deps(class, 0);
-       __raw_spin_unlock(&lockdep_lock);
+       arch_spin_lock(&lockdep_lock);
+       ret = __lockdep_count_backward_deps(&this);
+       arch_spin_unlock(&lockdep_lock);
        local_irq_restore(flags);
 
        return ret;
@@ -1050,29 +1210,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  * lead to <target>. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+               struct lock_list **target_entry)
 {
-       struct lock_list *entry;
+       int result;
 
-       if (lockdep_dependency_visit(source, depth))
-               return 1;
+       debug_atomic_inc(&nr_cyclic_checks);
 
-       debug_atomic_inc(&nr_cyclic_check_recursions);
-       if (depth > max_recursion_depth)
-               max_recursion_depth = depth;
-       if (depth >= RECURSION_LIMIT)
-               return print_infinite_recursion_bug();
-       /*
-        * Check this lock's dependency list:
-        */
-       list_for_each_entry(entry, &source->locks_after, entry) {
-               if (entry->class == hlock_class(check_target))
-                       return print_circular_bug_header(entry, depth+1);
-               debug_atomic_inc(&nr_cyclic_checks);
-               if (!check_noncircular(entry->class, depth+1))
-                       return print_circular_bug_entry(entry, depth+1);
-       }
-       return 1;
+       result = __bfs_forwards(root, target, class_equal, target_entry);
+
+       return result;
 }
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1081,103 +1228,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static enum lock_usage_bit find_usage_bit;
-static struct lock_class *forwards_match, *backwards_match;
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+       return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
 
 /*
  * Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+                       struct lock_list **target_entry)
 {
-       struct lock_list *entry;
-       int ret;
-
-       if (lockdep_dependency_visit(source, depth))
-               return 1;
-
-       if (depth > max_recursion_depth)
-               max_recursion_depth = depth;
-       if (depth >= RECURSION_LIMIT)
-               return print_infinite_recursion_bug();
+       int result;
 
        debug_atomic_inc(&nr_find_usage_forwards_checks);
-       if (source->usage_mask & (1 << find_usage_bit)) {
-               forwards_match = source;
-               return 2;
-       }
 
-       /*
-        * Check this lock's dependency list:
-        */
-       list_for_each_entry(entry, &source->locks_after, entry) {
-               debug_atomic_inc(&nr_find_usage_forwards_recursions);
-               ret = find_usage_forwards(entry->class, depth+1);
-               if (ret == 2 || ret == 0)
-                       return ret;
-       }
-       return 1;
+       result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+       return result;
 }
 
 /*
  * Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+                       struct lock_list **target_entry)
 {
-       struct lock_list *entry;
-       int ret;
+       int result;
 
-       if (lockdep_dependency_visit(source, depth))
-               return 1;
+       debug_atomic_inc(&nr_find_usage_backwards_checks);
 
-       if (!__raw_spin_is_locked(&lockdep_lock))
-               return DEBUG_LOCKS_WARN_ON(1);
+       result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
 
-       if (depth > max_recursion_depth)
-               max_recursion_depth = depth;
-       if (depth >= RECURSION_LIMIT)
-               return print_infinite_recursion_bug();
+       return result;
+}
 
-       debug_atomic_inc(&nr_find_usage_backwards_checks);
-       if (source->usage_mask & (1 << find_usage_bit)) {
-               backwards_match = source;
-               return 2;
-       }
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+       int bit;
 
-       if (!source && debug_locks_off_graph_unlock()) {
-               WARN_ON(1);
-               return 0;
-       }
+       printk("%*s->", depth, "");
+       print_lock_name(class);
+       printk(" ops: %lu", class->ops);
+       printk(" {\n");
 
-       /*
-        * Check this lock's dependency list:
-        */
-       list_for_each_entry(entry, &source->locks_before, entry) {
-               debug_atomic_inc(&nr_find_usage_backwards_recursions);
-               ret = find_usage_backwards(entry->class, depth+1);
-               if (ret == 2 || ret == 0)
-                       return ret;
+       for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+               if (class->usage_mask & (1 << bit)) {
+                       int len = depth;
+
+                       len += printk("%*s   %s", depth, "", usage_str[bit]);
+                       len += printk(" at:\n");
+                       print_stack_trace(class->usage_traces + bit, len);
+               }
        }
-       return 1;
+       printk("%*s }\n", depth, "");
+
+       printk("%*s ... key      at: ",depth,"");
+       print_ip_sym((unsigned long)class->key);
+}
+
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+                               struct lock_list *root)
+{
+       struct lock_list *entry = leaf;
+       int depth;
+
+       /*compute depth from generated tree by BFS*/
+       depth = get_lock_depth(leaf);
+
+       do {
+               print_lock_class_header(entry->class, depth);
+               printk("%*s ... acquired at:\n", depth, "");
+               print_stack_trace(&entry->trace, 2);
+               printk("\n");
+
+               if (depth == 0 && (entry != root)) {
+                       printk("lockdep:%s bad BFS generated tree\n", __func__);
+                       break;
+               }
+
+               entry = get_lock_parent(entry);
+               depth--;
+       } while (entry && (depth >= 0));
+
+       return;
 }
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
+                        struct lock_list *prev_root,
+                        struct lock_list *next_root,
+                        struct lock_list *backwards_entry,
+                        struct lock_list *forwards_entry,
                         struct held_lock *prev,
                         struct held_lock *next,
                         enum lock_usage_bit bit1,
@@ -1210,26 +1375,32 @@ print_bad_irq_dependency(struct task_struct *curr,
 
        printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
                irqclass);
-       print_lock_name(backwards_match);
+       print_lock_name(backwards_entry->class);
        printk("\n... which became %s-irq-safe at:\n", irqclass);
 
-       print_stack_trace(backwards_match->usage_traces + bit1, 1);
+       print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
 
        printk("\nto a %s-irq-unsafe lock:\n", irqclass);
-       print_lock_name(forwards_match);
+       print_lock_name(forwards_entry->class);
        printk("\n... which became %s-irq-unsafe at:\n", irqclass);
        printk("...");
 
-       print_stack_trace(forwards_match->usage_traces + bit2, 1);
+       print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
        printk("\nother info that might help us debug this:\n\n");
        lockdep_print_held_locks(curr);
 
-       printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
-       print_lock_dependencies(backwards_match, 0);
+       printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+       printk(" and the holding lock:\n");
+       if (!save_trace(&prev_root->trace))
+               return 0;
+       print_shortest_lock_dependencies(backwards_entry, prev_root);
 
-       printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
-       print_lock_dependencies(forwards_match, 0);
+       printk("\nthe dependencies between the lock to be acquired");
+       printk(" and %s-irq-unsafe lock:\n", irqclass);
+       if (!save_trace(&next_root->trace))
+               return 0;
+       print_shortest_lock_dependencies(forwards_entry, next_root);
 
        printk("\nstack backtrace:\n");
        dump_stack();
@@ -1243,25 +1414,76 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
            enum lock_usage_bit bit_forwards, const char *irqclass)
 {
        int ret;
+       struct lock_list this, that;
+       struct lock_list *uninitialized_var(target_entry);
+       struct lock_list *uninitialized_var(target_entry1);
 
-       find_usage_bit = bit_backwards;
-       /* fills in <backwards_match> */
-       ret = find_usage_backwards(hlock_class(prev), 0);
-       if (!ret || ret == 1)
+       this.parent = NULL;
+
+       this.class = hlock_class(prev);
+       ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (ret == 1)
                return ret;
 
-       find_usage_bit = bit_forwards;
-       ret = find_usage_forwards(hlock_class(next), 0);
-       if (!ret || ret == 1)
+       that.parent = NULL;
+       that.class = hlock_class(next);
+       ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (ret == 1)
                return ret;
-       /* ret == 2 */
-       return print_bad_irq_dependency(curr, prev, next,
+
+       return print_bad_irq_dependency(curr, &this, &that,
+                       target_entry, target_entry1,
+                       prev, next,
                        bit_backwards, bit_forwards, irqclass);
 }
 
-static int
-check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-               struct held_lock *next)
+static const char *state_names[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE),
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static const char *state_rnames[] = {
+#define LOCKDEP_STATE(__STATE) \
+       __stringify(__STATE)"-READ",
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline const char *state_name(enum lock_usage_bit bit)
+{
+       return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
+}
+
+static int exclusive_bit(int new_bit)
+{
+       /*
+        * USED_IN
+        * USED_IN_READ
+        * ENABLED
+        * ENABLED_READ
+        *
+        * bit 0 - write/read
+        * bit 1 - used_in/enabled
+        * bit 2+  state
+        */
+
+       int state = new_bit & ~3;
+       int dir = new_bit & 2;
+
+       /*
+        * keep state, bit flip the direction and strip read.
+        */
+       return state | (dir ^ 2);
+}
+
+static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
+                          struct held_lock *next, enum lock_usage_bit bit)
 {
        /*
         * Prove that the new dependency does not connect a hardirq-safe
@@ -1269,38 +1491,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
-                                       LOCK_ENABLED_HARDIRQS, "hard"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
+       bit++; /* _READ */
+
        /*
         * Prove that the new dependency does not connect a hardirq-safe-read
         * lock with a hardirq-unsafe lock - to achieve this we search
         * the backwards-subgraph starting at <prev>, and the
         * forwards-subgraph starting at <next>:
         */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
-                                       LOCK_ENABLED_HARDIRQS, "hard-read"))
+       if (!check_usage(curr, prev, next, bit,
+                          exclusive_bit(bit), state_name(bit)))
                return 0;
 
-       /*
-        * Prove that the new dependency does not connect a softirq-safe
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
-               return 0;
-       /*
-        * Prove that the new dependency does not connect a softirq-safe-read
-        * lock with a softirq-unsafe lock - to achieve this we search
-        * the backwards-subgraph starting at <prev>, and the
-        * forwards-subgraph starting at <next>:
-        */
-       if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
-                                       LOCK_ENABLED_SOFTIRQS, "soft"))
+       return 1;
+}
+
+static int
+check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
+               struct held_lock *next)
+{
+#define LOCKDEP_STATE(__STATE)                                         \
+       if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
                return 0;
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
 
        return 1;
 }
@@ -1431,6 +1649,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 {
        struct lock_list *entry;
        int ret;
+       struct lock_list this;
+       struct lock_list *uninitialized_var(target_entry);
 
        /*
         * Prove that the new <prev> -> <next> dependency would not
@@ -1441,10 +1661,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         * We are using global variables to control the recursion, to
         * keep the stackframe size of the recursive functions low:
         */
-       check_source = next;
-       check_target = prev;
-       if (!(check_noncircular(hlock_class(next), 0)))
-               return print_circular_bug_tail();
+       this.class = hlock_class(next);
+       this.parent = NULL;
+       ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+       if (unlikely(!ret))
+               return print_circular_bug(&this, target_entry, next, prev);
+       else if (unlikely(ret < 0))
+               return print_bfs_bug(ret);
 
        if (!check_prev_add_irq(curr, prev, next))
                return 0;
@@ -1645,6 +1868,7 @@ cache_hit:
 
                printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
        chain = lock_chains + nr_lock_chains++;
@@ -1842,7 +2066,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
  * print irq inversion bug:
  */
 static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+                       struct lock_list *root, struct lock_list *other,
                        struct held_lock *this, int forwards,
                        const char *irqclass)
 {
@@ -1857,20 +2082,19 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
                curr->comm, task_pid_nr(curr));
        print_lock(this);
        if (forwards)
-               printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
+               printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
        else
-               printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
-       print_lock_name(other);
+               printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
+       print_lock_name(other->class);
        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
        printk("\nother info that might help us debug this:\n");
        lockdep_print_held_locks(curr);
 
-       printk("\nthe first lock's dependencies:\n");
-       print_lock_dependencies(hlock_class(this), 0);
-
-       printk("\nthe second lock's dependencies:\n");
-       print_lock_dependencies(other, 0);
+       printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+       if (!save_trace(&root->trace))
+               return 0;
+       print_shortest_lock_dependencies(other, root);
 
        printk("\nstack backtrace:\n");
        dump_stack();
@@ -1887,14 +2111,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
                     enum lock_usage_bit bit, const char *irqclass)
 {
        int ret;
-
-       find_usage_bit = bit;
-       /* fills in <forwards_match> */
-       ret = find_usage_forwards(hlock_class(this), 0);
-       if (!ret || ret == 1)
+       struct lock_list root;
+       struct lock_list *uninitialized_var(target_entry);
+
+       root.parent = NULL;
+       root.class = hlock_class(this);
+       ret = find_usage_forwards(&root, bit, &target_entry);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (ret == 1)
                return ret;
 
-       return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+       return print_irq_inversion_bug(curr, &root, target_entry,
+                                       this, 1, irqclass);
 }
 
 /*
@@ -1906,14 +2135,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
                      enum lock_usage_bit bit, const char *irqclass)
 {
        int ret;
-
-       find_usage_bit = bit;
-       /* fills in <backwards_match> */
-       ret = find_usage_backwards(hlock_class(this), 0);
-       if (!ret || ret == 1)
+       struct lock_list root;
+       struct lock_list *uninitialized_var(target_entry);
+
+       root.parent = NULL;
+       root.class = hlock_class(this);
+       ret = find_usage_backwards(&root, bit, &target_entry);
+       if (ret < 0)
+               return print_bfs_bug(ret);
+       if (ret == 1)
                return ret;
 
-       return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+       return print_irq_inversion_bug(curr, &root, target_entry,
+                                       this, 0, irqclass);
 }
 
 void print_irqtrace_events(struct task_struct *curr)
@@ -1929,7 +2163,7 @@ void print_irqtrace_events(struct task_struct *curr)
        print_ip_sym(curr->softirq_disable_ip);
 }
 
-static int hardirq_verbose(struct lock_class *class)
+static int HARDIRQ_verbose(struct lock_class *class)
 {
 #if HARDIRQ_VERBOSE
        return class_filter(class);
@@ -1937,7 +2171,7 @@ static int hardirq_verbose(struct lock_class *class)
        return 0;
 }
 
-static int softirq_verbose(struct lock_class *class)
+static int SOFTIRQ_verbose(struct lock_class *class)
 {
 #if SOFTIRQ_VERBOSE
        return class_filter(class);
@@ -1945,185 +2179,95 @@ static int softirq_verbose(struct lock_class *class)
        return 0;
 }
 
+static int RECLAIM_FS_verbose(struct lock_class *class)
+{
+#if RECLAIM_VERBOSE
+       return class_filter(class);
+#endif
+       return 0;
+}
+
 #define STRICT_READ_CHECKS     1
 
-static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+static int (*state_verbose_f[])(struct lock_class *class) = {
+#define LOCKDEP_STATE(__STATE) \
+       __STATE##_verbose,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
+static inline int state_verbose(enum lock_usage_bit bit,
+                               struct lock_class *class)
+{
+       return state_verbose_f[bit >> 2](class);
+}
+
+typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
+                            enum lock_usage_bit bit, const char *name);
+
+static int
+mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit new_bit)
 {
-       int ret = 1;
+       int excl_bit = exclusive_bit(new_bit);
+       int read = new_bit & 1;
+       int dir = new_bit & 2;
 
-       switch(new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_HARDIRQS_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-safe, check that this lock
-                * took no hardirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_ENABLED_SOFTIRQS_READ))
-                       return 0;
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-safe, check that this lock
-                * took no softirq-unsafe-read lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                               LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_HARDIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
-                       return 0;
-               /*
-                * just marked it hardirq-read-safe, check that this lock
-                * took no hardirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_HARDIRQS, "hard"))
-                       return 0;
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_USED_IN_SOFTIRQ_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
-                       return 0;
-               /*
-                * just marked it softirq-read-safe, check that this lock
-                * took no softirq-unsafe lock in the past:
-                */
-               if (!check_usage_forwards(curr, this,
-                                         LOCK_ENABLED_SOFTIRQS, "soft"))
-                       return 0;
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_HARDIRQ_READ))
-                       return 0;
-               /*
-                * just marked it hardirq-unsafe, check that no hardirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-unsafe, check that no
-                * hardirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
-                       return 0;
-               if (!valid_state(curr, this, new_bit,
-                                LOCK_USED_IN_SOFTIRQ_READ))
-                       return 0;
-               /*
-                * just marked it softirq-unsafe, check that no softirq-safe
-                * lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-unsafe, check that no
-                * softirq-safe-read lock in the system ever took
-                * it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                  LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
-                       return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_HARDIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
-                       return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it hardirq-read-unsafe, check that no
-                * hardirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_HARDIRQ, "hard"))
-                       return 0;
-#endif
-               if (hardirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       case LOCK_ENABLED_SOFTIRQS_READ:
-               if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
+       /*
+        * mark USED_IN has to look forwards -- to ensure no dependency
+        * has ENABLED state, which would allow recursion deadlocks.
+        *
+        * mark ENABLED has to look backwards -- to ensure no dependee
+        * has USED_IN state, which, again, would allow  recursion deadlocks.
+        */
+       check_usage_f usage = dir ?
+               check_usage_backwards : check_usage_forwards;
+
+       /*
+        * Validate that this particular lock does not have conflicting
+        * usage states.
+        */
+       if (!valid_state(curr, this, new_bit, excl_bit))
+               return 0;
+
+       /*
+        * Validate that the lock dependencies don't have conflicting usage
+        * states.
+        */
+       if ((!read || !dir || STRICT_READ_CHECKS) &&
+                       !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
+               return 0;
+
+       /*
+        * Check for read in write conflicts
+        */
+       if (!read) {
+               if (!valid_state(curr, this, new_bit, excl_bit + 1))
                        return 0;
-#if STRICT_READ_CHECKS
-               /*
-                * just marked it softirq-read-unsafe, check that no
-                * softirq-safe lock in the system ever took it in the past:
-                */
-               if (!check_usage_backwards(curr, this,
-                                          LOCK_USED_IN_SOFTIRQ, "soft"))
+
+               if (STRICT_READ_CHECKS &&
+                       !usage(curr, this, excl_bit + 1,
+                               state_name(new_bit + 1)))
                        return 0;
-#endif
-               if (softirq_verbose(hlock_class(this)))
-                       ret = 2;
-               break;
-       default:
-               WARN_ON(1);
-               break;
        }
 
-       return ret;
+       if (state_verbose(new_bit, hlock_class(this)))
+               return 2;
+
+       return 1;
 }
 
+enum mark_type {
+#define LOCKDEP_STATE(__STATE) __STATE,
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
+};
+
 /*
  * Mark all held locks with a usage bit:
  */
 static int
-mark_held_locks(struct task_struct *curr, int hardirq)
+mark_held_locks(struct task_struct *curr, enum mark_type mark)
 {
        enum lock_usage_bit usage_bit;
        struct held_lock *hlock;
@@ -2132,17 +2276,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
-               if (hardirq) {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_HARDIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_HARDIRQS;
-               } else {
-                       if (hlock->read)
-                               usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
-                       else
-                               usage_bit = LOCK_ENABLED_SOFTIRQS;
-               }
+               usage_bit = 2 + (mark << 2); /* ENABLED */
+               if (hlock->read)
+                       usage_bit += 1; /* READ */
+
+               BUG_ON(usage_bit >= LOCK_USAGE_STATES);
+
                if (!mark_lock(curr, hlock, usage_bit))
                        return 0;
        }
@@ -2169,12 +2308,11 @@ void early_boot_irqs_on(void)
 /*
  * Hardirqs will be enabled:
  */
-void trace_hardirqs_on_caller(unsigned long a0)
+void trace_hardirqs_on_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
-       unsigned long ip;
 
-       time_hardirqs_on(CALLER_ADDR0, a0);
+       time_hardirqs_on(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -2188,7 +2326,6 @@ void trace_hardirqs_on_caller(unsigned long a0)
        }
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
-       ip = (unsigned long) __builtin_return_address(0);
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
@@ -2198,7 +2335,7 @@ void trace_hardirqs_on_caller(unsigned long a0)
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         */
-       if (!mark_held_locks(curr, 1))
+       if (!mark_held_locks(curr, HARDIRQ))
                return;
        /*
         * If we have softirqs enabled, then set the usage
@@ -2206,7 +2343,7 @@ void trace_hardirqs_on_caller(unsigned long a0)
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, 0))
+               if (!mark_held_locks(curr, SOFTIRQ))
                        return;
 
        curr->hardirq_enable_ip = ip;
@@ -2224,11 +2361,11 @@ EXPORT_SYMBOL(trace_hardirqs_on);
 /*
  * Hardirqs were disabled:
  */
-void trace_hardirqs_off_caller(unsigned long a0)
+void trace_hardirqs_off_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       time_hardirqs_off(CALLER_ADDR0, a0);
+       time_hardirqs_off(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
@@ -2241,7 +2378,7 @@ void trace_hardirqs_off_caller(unsigned long a0)
                 * We have done an ON -> OFF transition:
                 */
                curr->hardirqs_enabled = 0;
-               curr->hardirq_disable_ip = _RET_IP_;
+               curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(&hardirqs_off_events);
        } else
@@ -2286,7 +2423,7 @@ void trace_softirqs_on(unsigned long ip)
         * enabled too:
         */
        if (curr->hardirqs_enabled)
-               mark_held_locks(curr, 0);
+               mark_held_locks(curr, SOFTIRQ);
 }
 
 /*
@@ -2315,6 +2452,48 @@ void trace_softirqs_off(unsigned long ip)
                debug_atomic_inc(&redundant_softirqs_off);
 }
 
+static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
+{
+       struct task_struct *curr = current;
+
+       if (unlikely(!debug_locks))
+               return;
+
+       /* no reclaim without waiting on it */
+       if (!(gfp_mask & __GFP_WAIT))
+               return;
+
+       /* this guy won't enter reclaim */
+       if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
+               return;
+
+       /* We're only interested __GFP_FS allocations for now */
+       if (!(gfp_mask & __GFP_FS))
+               return;
+
+       if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
+               return;
+
+       mark_held_locks(curr, RECLAIM_FS);
+}
+
+static void check_flags(unsigned long flags);
+
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+       current->lockdep_recursion = 1;
+       __lockdep_trace_alloc(gfp_mask, flags);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+
 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
 {
        /*
@@ -2343,19 +2522,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
        if (!hlock->hardirqs_off) {
                if (hlock->read) {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS_READ))
+                                       LOCK_ENABLED_HARDIRQ_READ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS_READ))
+                                               LOCK_ENABLED_SOFTIRQ_READ))
                                        return 0;
                } else {
                        if (!mark_lock(curr, hlock,
-                                       LOCK_ENABLED_HARDIRQS))
+                                       LOCK_ENABLED_HARDIRQ))
                                return 0;
                        if (curr->softirqs_enabled)
                                if (!mark_lock(curr, hlock,
-                                               LOCK_ENABLED_SOFTIRQS))
+                                               LOCK_ENABLED_SOFTIRQ))
+                                       return 0;
+               }
+       }
+
+       /*
+        * We reuse the irq context infrastructure more broadly as a general
+        * context checking code. This tests GFP_FS recursion (a lock taken
+        * during reclaim for a GFP_FS allocation is held over a GFP_FS
+        * allocation).
+        */
+       if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
+               if (hlock->read) {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
+                                       return 0;
+               } else {
+                       if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
                                        return 0;
                }
        }
@@ -2410,6 +2605,10 @@ static inline int separate_irq_context(struct task_struct *curr,
        return 0;
 }
 
+void lockdep_trace_alloc(gfp_t gfp_mask)
+{
+}
+
 #endif
 
 /*
@@ -2443,14 +2642,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
                return 0;
 
        switch (new_bit) {
-       case LOCK_USED_IN_HARDIRQ:
-       case LOCK_USED_IN_SOFTIRQ:
-       case LOCK_USED_IN_HARDIRQ_READ:
-       case LOCK_USED_IN_SOFTIRQ_READ:
-       case LOCK_ENABLED_HARDIRQS:
-       case LOCK_ENABLED_SOFTIRQS:
-       case LOCK_ENABLED_HARDIRQS_READ:
-       case LOCK_ENABLED_SOFTIRQS_READ:
+#define LOCKDEP_STATE(__STATE)                 \
+       case LOCK_USED_IN_##__STATE:            \
+       case LOCK_USED_IN_##__STATE##_READ:     \
+       case LOCK_ENABLED_##__STATE:            \
+       case LOCK_ENABLED_##__STATE##_READ:
+#include "lockdep_states.h"
+#undef LOCKDEP_STATE
                ret = mark_lock_irq(curr, this, new_bit);
                if (!ret)
                        return 0;
@@ -2486,13 +2684,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       if (unlikely(!debug_locks))
+       lock->class_cache = NULL;
+#ifdef CONFIG_LOCK_STAT
+       lock->cpu = raw_smp_processor_id();
+#endif
+
+       if (DEBUG_LOCKS_WARN_ON(!name)) {
+               lock->name = "NULL";
                return;
+       }
+
+       lock->name = name;
 
        if (DEBUG_LOCKS_WARN_ON(!key))
                return;
-       if (DEBUG_LOCKS_WARN_ON(!name))
-               return;
        /*
         * Sanity check, the lock-class key must be persistent:
         */
@@ -2501,16 +2706,14 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
                DEBUG_LOCKS_WARN_ON(1);
                return;
        }
-       lock->name = name;
        lock->key = key;
-       lock->class_cache = NULL;
-#ifdef CONFIG_LOCK_STAT
-       lock->cpu = raw_smp_processor_id();
-#endif
+
+       if (unlikely(!debug_locks))
+               return;
+
        if (subclass)
                register_lock_class(lock, subclass, 1);
 }
-
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
 /*
@@ -2519,13 +2722,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
  */
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
-                         struct lockdep_map *nest_lock, unsigned long ip)
+                         struct lockdep_map *nest_lock, unsigned long ip,
+                         int references)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
        struct held_lock *hlock;
        unsigned int depth, id;
        int chain_head = 0;
+       int class_idx;
        u64 chain_key;
 
        if (!prove_locking)
@@ -2541,6 +2746,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                debug_locks_off();
                printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
 
@@ -2572,10 +2778,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
                return 0;
 
+       class_idx = class - lock_classes + 1;
+
+       if (depth) {
+               hlock = curr->held_locks + depth - 1;
+               if (hlock->class_idx == class_idx && nest_lock) {
+                       if (hlock->references)
+                               hlock->references++;
+                       else
+                               hlock->references = 2;
+
+                       return 1;
+               }
+       }
+
        hlock = curr->held_locks + depth;
        if (DEBUG_LOCKS_WARN_ON(!class))
                return 0;
-       hlock->class_idx = class - lock_classes + 1;
+       hlock->class_idx = class_idx;
        hlock->acquire_ip = ip;
        hlock->instance = lock;
        hlock->nest_lock = nest_lock;
@@ -2583,9 +2803,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->read = read;
        hlock->check = check;
        hlock->hardirqs_off = !!hardirqs_off;
+       hlock->references = references;
 #ifdef CONFIG_LOCK_STAT
        hlock->waittime_stamp = 0;
-       hlock->holdtime_stamp = sched_clock();
+       hlock->holdtime_stamp = lockstat_clock();
 #endif
 
        if (check == 2 && !mark_irqflags(curr, hlock))
@@ -2637,6 +2858,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                debug_locks_off();
                printk("BUG: MAX_LOCK_DEPTH too low!\n");
                printk("turning off the locking correctness validator.\n");
+               dump_stack();
                return 0;
        }
 
@@ -2690,9 +2912,34 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
        return 1;
 }
 
+static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+{
+       if (hlock->instance == lock)
+               return 1;
+
+       if (hlock->references) {
+               struct lock_class *class = lock->class_cache;
+
+               if (!class)
+                       class = look_up_lock_class(lock, 0);
+
+               if (DEBUG_LOCKS_WARN_ON(!class))
+                       return 0;
+
+               if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
+                       return 0;
+
+               if (hlock->class_idx == class - lock_classes + 1)
+                       return 1;
+       }
+
+       return 0;
+}
+
 static int
-__lock_set_subclass(struct lockdep_map *lock,
-                   unsigned int subclass, unsigned long ip)
+__lock_set_class(struct lockdep_map *lock, const char *name,
+                struct lock_class_key *key, unsigned int subclass,
+                unsigned long ip)
 {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
@@ -2712,13 +2959,14 @@ __lock_set_subclass(struct lockdep_map *lock,
                 */
                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
                        break;
-               if (hlock->instance == lock)
+               if (match_held_lock(hlock, lock))
                        goto found_it;
                prev_hlock = hlock;
        }
        return print_unlock_inbalance_bug(curr, lock, ip);
 
 found_it:
+       lockdep_init_map(lock, name, key, 0);
        class = register_lock_class(lock, subclass, 0);
        hlock->class_idx = class - lock_classes + 1;
 
@@ -2730,7 +2978,8 @@ found_it:
                if (!__lock_acquire(hlock->instance,
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
-                               hlock->nest_lock, hlock->acquire_ip))
+                               hlock->nest_lock, hlock->acquire_ip,
+                               hlock->references))
                        return 0;
        }
 
@@ -2769,20 +3018,34 @@ lock_release_non_nested(struct task_struct *curr,
                 */
                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
                        break;
-               if (hlock->instance == lock)
+               if (match_held_lock(hlock, lock))
                        goto found_it;
                prev_hlock = hlock;
        }
        return print_unlock_inbalance_bug(curr, lock, ip);
 
 found_it:
-       lock_release_holdtime(hlock);
+       if (hlock->instance == lock)
+               lock_release_holdtime(hlock);
+
+       if (hlock->references) {
+               hlock->references--;
+               if (hlock->references) {
+                       /*
+                        * We had, and after removing one, still have
+                        * references, the current lock stack is still
+                        * valid. We're done!
+                        */
+                       return 1;
+               }
+       }
 
        /*
         * We have the right lock to unlock, 'hlock' points to it.
         * Now we remove it from the stack, and add back the other
         * entries (if any), recalculating the hash along the way:
         */
+
        curr->lockdep_depth = i;
        curr->curr_chain_key = hlock->prev_chain_key;
 
@@ -2791,7 +3054,8 @@ found_it:
                if (!__lock_acquire(hlock->instance,
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
-                               hlock->nest_lock, hlock->acquire_ip))
+                               hlock->nest_lock, hlock->acquire_ip,
+                               hlock->references))
                        return 0;
        }
 
@@ -2821,7 +3085,7 @@ static int lock_release_nested(struct task_struct *curr,
        /*
         * Is the unlock non-nested:
         */
-       if (hlock->instance != lock)
+       if (hlock->instance != lock || hlock->references)
                return lock_release_non_nested(curr, lock, ip);
        curr->lockdep_depth--;
 
@@ -2866,6 +3130,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
        check_chain_key(curr);
 }
 
+static int __lock_is_held(struct lockdep_map *lock)
+{
+       struct task_struct *curr = current;
+       int i;
+
+       for (i = 0; i < curr->lockdep_depth; i++) {
+               struct held_lock *hlock = curr->held_locks + i;
+
+               if (match_held_lock(hlock, lock))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * Check whether we follow the irq-flags state precisely:
  */
@@ -2903,9 +3182,9 @@ static void check_flags(unsigned long flags)
 #endif
 }
 
-void
-lock_set_subclass(struct lockdep_map *lock,
-                 unsigned int subclass, unsigned long ip)
+void lock_set_class(struct lockdep_map *lock, const char *name,
+                   struct lock_class_key *key, unsigned int subclass,
+                   unsigned long ip)
 {
        unsigned long flags;
 
@@ -2915,13 +3194,12 @@ lock_set_subclass(struct lockdep_map *lock,
        raw_local_irq_save(flags);
        current->lockdep_recursion = 1;
        check_flags(flags);
-       if (__lock_set_subclass(lock, subclass, ip))
+       if (__lock_set_class(lock, name, key, subclass, ip))
                check_chain_key(current);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
-EXPORT_SYMBOL_GPL(lock_set_subclass);
+EXPORT_SYMBOL_GPL(lock_set_class);
 
 /*
  * We are not always called with irqs disabled - do that here,
@@ -2933,6 +3211,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 {
        unsigned long flags;
 
+       trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
+
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2941,11 +3221,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        current->lockdep_recursion = 1;
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip);
+                      irqs_disabled_flags(flags), nest_lock, ip, 0);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_acquire);
 
 void lock_release(struct lockdep_map *lock, int nested,
@@ -2953,6 +3232,8 @@ void lock_release(struct lockdep_map *lock, int nested,
 {
        unsigned long flags;
 
+       trace_lock_release(lock, nested, ip);
+
        if (unlikely(current->lockdep_recursion))
                return;
 
@@ -2963,9 +3244,38 @@ void lock_release(struct lockdep_map *lock, int nested,
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
-
 EXPORT_SYMBOL_GPL(lock_release);
 
+int lock_is_held(struct lockdep_map *lock)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       if (unlikely(current->lockdep_recursion))
+               return ret;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+
+       current->lockdep_recursion = 1;
+       ret = __lock_is_held(lock);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(lock_is_held);
+
+void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
+{
+       current->lockdep_reclaim_gfp = gfp_mask;
+}
+
+void lockdep_clear_current_reclaim_state(void)
+{
+       current->lockdep_reclaim_gfp = 0;
+}
+
 #ifdef CONFIG_LOCK_STAT
 static int
 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
@@ -3001,7 +3311,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       int i, point;
+       int i, contention_point, contending_point;
 
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3015,7 +3325,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
                 */
                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
                        break;
-               if (hlock->instance == lock)
+               if (match_held_lock(hlock, lock))
                        goto found_it;
                prev_hlock = hlock;
        }
@@ -3023,27 +3333,33 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
        return;
 
 found_it:
-       hlock->waittime_stamp = sched_clock();
+       if (hlock->instance != lock)
+               return;
+
+       hlock->waittime_stamp = lockstat_clock();
 
-       point = lock_contention_point(hlock_class(hlock), ip);
+       contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
+       contending_point = lock_point(hlock_class(hlock)->contending_point,
+                                     lock->ip);
 
        stats = get_lock_stats(hlock_class(hlock));
-       if (point < ARRAY_SIZE(stats->contention_point))
-               stats->contention_point[point]++;
+       if (contention_point < LOCKSTAT_POINTS)
+               stats->contention_point[contention_point]++;
+       if (contending_point < LOCKSTAT_POINTS)
+               stats->contending_point[contending_point]++;
        if (lock->cpu != smp_processor_id())
                stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
 }
 
 static void
-__lock_acquired(struct lockdep_map *lock)
+__lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
-       u64 now;
-       s64 waittime = 0;
+       u64 now, waittime = 0;
        int i, cpu;
 
        depth = curr->lockdep_depth;
@@ -3058,7 +3374,7 @@ __lock_acquired(struct lockdep_map *lock)
                 */
                if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
                        break;
-               if (hlock->instance == lock)
+               if (match_held_lock(hlock, lock))
                        goto found_it;
                prev_hlock = hlock;
        }
@@ -3066,13 +3382,18 @@ __lock_acquired(struct lockdep_map *lock)
        return;
 
 found_it:
+       if (hlock->instance != lock)
+               return;
+
        cpu = smp_processor_id();
        if (hlock->waittime_stamp) {
-               now = sched_clock();
+               now = lockstat_clock();
                waittime = now - hlock->waittime_stamp;
                hlock->holdtime_stamp = now;
        }
 
+       trace_lock_acquired(lock, ip, waittime);
+
        stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
                if (hlock->read)
@@ -3085,12 +3406,15 @@ found_it:
        put_lock_stats(stats);
 
        lock->cpu = cpu;
+       lock->ip = ip;
 }
 
 void lock_contended(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
+       trace_lock_contended(lock, ip);
+
        if (unlikely(!lock_stat))
                return;
 
@@ -3106,7 +3430,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(lock_contended);
 
-void lock_acquired(struct lockdep_map *lock)
+void lock_acquired(struct lockdep_map *lock, unsigned long ip)
 {
        unsigned long flags;
 
@@ -3119,7 +3443,7 @@ void lock_acquired(struct lockdep_map *lock)
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
-       __lock_acquired(lock);
+       __lock_acquired(lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -3278,10 +3602,10 @@ void __init lockdep_info(void)
 {
        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
 
-       printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
+       printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
        printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
        printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
-       printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
+       printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
        printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
        printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
        printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
@@ -3291,7 +3615,12 @@ void __init lockdep_info(void)
                sizeof(struct list_head) * CLASSHASH_SIZE +
                sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
                sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-               sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+               sizeof(struct list_head) * CHAINHASH_SIZE
+#ifdef CONFIG_PROVE_LOCKING
+               + sizeof(struct circular_queue)
+#endif
+               ) / 1024
+               );
 
        printk(" per task-struct memory footprint: %lu bytes\n",
                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
@@ -3443,7 +3772,6 @@ retry:
        if (unlock)
                read_unlock(&tasklist_lock);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_all_locks);
 
 /*
@@ -3464,7 +3792,6 @@ void debug_show_held_locks(struct task_struct *task)
 {
                __debug_show_held_locks(task);
 }
-
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
 void lockdep_sys_exit(void)
@@ -3482,3 +3809,21 @@ void lockdep_sys_exit(void)
                lockdep_print_held_locks(curr);
        }
 }
+
+void lockdep_rcu_dereference(const char *file, const int line)
+{
+       struct task_struct *curr = current;
+
+       if (!debug_locks_off())
+               return;
+       printk("\n===================================================\n");
+       printk(  "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
+       printk(  "---------------------------------------------------\n");
+       printk("%s:%d invoked rcu_dereference_check() without protection!\n",
+                       file, line);
+       printk("\nother info that might help us debug this:\n\n");
+       lockdep_print_held_locks(curr);
+       printk("\nstack backtrace:\n");
+       dump_stack();
+}
+EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);