checkpatch: validate signature styles and To: and Cc: lines
[linux-2.6.git] / kernel / lockdep.c
index 73cebd7..3956f51 100644 (file)
@@ -1381,7 +1381,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
                printk("\n");
 
                if (depth == 0 && (entry != root)) {
-                       printk("lockdep:%s bad BFS generated tree\n", __func__);
+                       printk("lockdep:%s bad path found in chain graph\n", __func__);
                        break;
                }
 
@@ -1395,15 +1395,15 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
 static void
 print_irq_lock_scenario(struct lock_list *safe_entry,
                        struct lock_list *unsafe_entry,
-                       struct held_lock *prev,
-                       struct held_lock *next)
+                       struct lock_class *prev_class,
+                       struct lock_class *next_class)
 {
        struct lock_class *safe_class = safe_entry->class;
        struct lock_class *unsafe_class = unsafe_entry->class;
-       struct lock_class *middle_class = hlock_class(prev);
+       struct lock_class *middle_class = prev_class;
 
        if (middle_class == safe_class)
-               middle_class = hlock_class(next);
+               middle_class = next_class;
 
        /*
         * A direct locking problem where unsafe_class lock is taken
@@ -1499,7 +1499,8 @@ print_bad_irq_dependency(struct task_struct *curr,
        print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
        printk("\nother info that might help us debug this:\n\n");
-       print_irq_lock_scenario(backwards_entry, forwards_entry, prev, next);
+       print_irq_lock_scenario(backwards_entry, forwards_entry,
+                               hlock_class(prev), hlock_class(next));
 
        lockdep_print_held_locks(curr);
 
@@ -1664,6 +1665,26 @@ static inline void inc_chains(void)
 
 #endif
 
+static void
+print_deadlock_scenario(struct held_lock *nxt,
+                            struct held_lock *prv)
+{
+       struct lock_class *next = hlock_class(nxt);
+       struct lock_class *prev = hlock_class(prv);
+
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0\n");
+       printk("       ----\n");
+       printk("  lock(");
+       __print_lock_name(prev);
+       printk(");\n");
+       printk("  lock(");
+       __print_lock_name(next);
+       printk(");\n");
+       printk("\n *** DEADLOCK ***\n\n");
+       printk(" May be due to missing lock nesting notation\n\n");
+}
+
 static int
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
                   struct held_lock *next)
@@ -1682,6 +1703,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
        print_lock(prev);
 
        printk("\nother info that might help us debug this:\n");
+       print_deadlock_scenario(next, prev);
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -1951,7 +1973,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
        struct list_head *hash_head = chainhashentry(chain_key);
        struct lock_chain *chain;
        struct held_lock *hlock_curr, *hlock_next;
-       int i, j, n, cn;
+       int i, j;
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
@@ -2011,15 +2033,9 @@ cache_hit:
        }
        i++;
        chain->depth = curr->lockdep_depth + 1 - i;
-       cn = nr_chain_hlocks;
-       while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
-               n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
-               if (n == cn)
-                       break;
-               cn = n;
-       }
-       if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
-               chain->base = cn;
+       if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
+               chain->base = nr_chain_hlocks;
+               nr_chain_hlocks += chain->depth;
                for (j = 0; j < chain->depth - 1; j++, i++) {
                        int lock_id = curr->held_locks[i].class_idx - 1;
                        chain_hlocks[chain->base + j] = lock_id;
@@ -2136,6 +2152,24 @@ static void check_chain_key(struct task_struct *curr)
 #endif
 }
 
+static void
+print_usage_bug_scenario(struct held_lock *lock)
+{
+       struct lock_class *class = hlock_class(lock);
+
+       printk(" Possible unsafe locking scenario:\n\n");
+       printk("       CPU0\n");
+       printk("       ----\n");
+       printk("  lock(");
+       __print_lock_name(class);
+       printk(");\n");
+       printk("  <Interrupt>\n");
+       printk("    lock(");
+       __print_lock_name(class);
+       printk(");\n");
+       printk("\n *** DEADLOCK ***\n\n");
+}
+
 static int
 print_usage_bug(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
@@ -2164,6 +2198,8 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
 
        print_irqtrace_events(curr);
        printk("\nother info that might help us debug this:\n");
+       print_usage_bug_scenario(this);
+
        lockdep_print_held_locks(curr);
 
        printk("\nstack backtrace:\n");
@@ -2198,6 +2234,10 @@ print_irq_inversion_bug(struct task_struct *curr,
                        struct held_lock *this, int forwards,
                        const char *irqclass)
 {
+       struct lock_list *entry = other;
+       struct lock_list *middle = NULL;
+       int depth;
+
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
 
@@ -2216,6 +2256,25 @@ print_irq_inversion_bug(struct task_struct *curr,
        printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
        printk("\nother info that might help us debug this:\n");
+
+       /* Find a middle lock (if one exists) */
+       depth = get_lock_depth(other);
+       do {
+               if (depth == 0 && (entry != root)) {
+                       printk("lockdep:%s bad path found in chain graph\n", __func__);
+                       break;
+               }
+               middle = entry;
+               entry = get_lock_parent(entry);
+               depth--;
+       } while (entry && entry != root && (depth >= 0));
+       if (forwards)
+               print_irq_lock_scenario(root, other,
+                       middle ? middle->class : root->class, other->class);
+       else
+               print_irq_lock_scenario(other, root,
+                       middle ? middle->class : other->class, root->class);
+
        lockdep_print_held_locks(curr);
 
        printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
@@ -2409,6 +2468,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
 
                BUG_ON(usage_bit >= LOCK_USAGE_STATES);
 
+               if (hlock_class(hlock)->key == &__lockdep_no_validate__)
+                       continue;
+
                if (!mark_lock(curr, hlock, usage_bit))
                        return 0;
        }
@@ -2419,15 +2481,10 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
 /*
  * Hardirqs will be enabled:
  */
-void trace_hardirqs_on_caller(unsigned long ip)
+static void __trace_hardirqs_on_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       time_hardirqs_on(CALLER_ADDR0, ip);
-
-       if (unlikely(!debug_locks || current->lockdep_recursion))
-               return;
-
        if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
                return;
 
@@ -2443,8 +2500,6 @@ void trace_hardirqs_on_caller(unsigned long ip)
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
 
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-               return;
        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
                return;
        /*
@@ -2466,6 +2521,21 @@ void trace_hardirqs_on_caller(unsigned long ip)
        curr->hardirq_enable_event = ++curr->irq_events;
        debug_atomic_inc(hardirqs_on_events);
 }
+
+void trace_hardirqs_on_caller(unsigned long ip)
+{
+       time_hardirqs_on(CALLER_ADDR0, ip);
+
+       if (unlikely(!debug_locks || current->lockdep_recursion))
+               return;
+
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return;
+
+       current->lockdep_recursion = 1;
+       __trace_hardirqs_on_caller(ip);
+       current->lockdep_recursion = 0;
+}
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
 void trace_hardirqs_on(void)
@@ -2515,7 +2585,7 @@ void trace_softirqs_on(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       if (unlikely(!debug_locks))
+       if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
@@ -2526,6 +2596,7 @@ void trace_softirqs_on(unsigned long ip)
                return;
        }
 
+       current->lockdep_recursion = 1;
        /*
         * We'll do an OFF -> ON transition:
         */
@@ -2540,6 +2611,7 @@ void trace_softirqs_on(unsigned long ip)
         */
        if (curr->hardirqs_enabled)
                mark_held_locks(curr, SOFTIRQ);
+       current->lockdep_recursion = 0;
 }
 
 /*
@@ -2549,7 +2621,7 @@ void trace_softirqs_off(unsigned long ip)
 {
        struct task_struct *curr = current;
 
-       if (unlikely(!debug_locks))
+       if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
@@ -3367,7 +3439,7 @@ int lock_is_held(struct lockdep_map *lock)
        int ret = 0;
 
        if (unlikely(current->lockdep_recursion))
-               return ret;
+               return 1; /* avoid false negative lockdep_assert_held() */
 
        raw_local_irq_save(flags);
        check_flags(flags);