[PATCH] Minor cleanup to lockdep.c
[linux-2.6.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *
10  * this code maps all the lock dependencies as they occur in a live kernel
11  * and will warn about the following classes of locking bugs:
12  *
13  * - lock inversion scenarios
14  * - circular lock dependencies
15  * - hardirq/softirq safe/unsafe locking bugs
16  *
17  * Bugs are reported even if the current locking scenario does not cause
18  * any deadlock at this point.
19  *
20  * I.e. if anytime in the past two locks were taken in a different order,
21  * even if it happened for another task, even if those were different
22  * locks (but of the same class as this lock), this code will detect it.
23  *
24  * Thanks to Arjan van de Ven for coming up with the initial idea of
25  * mapping lock dependencies runtime.
26  */
27 #include <linux/mutex.h>
28 #include <linux/sched.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/spinlock.h>
34 #include <linux/kallsyms.h>
35 #include <linux/interrupt.h>
36 #include <linux/stacktrace.h>
37 #include <linux/debug_locks.h>
38 #include <linux/irqflags.h>
39
40 #include <asm/sections.h>
41
42 #include "lockdep_internals.h"
43
44 /*
45  * hash_lock: protects the lockdep hashes and class/list/hash allocators.
46  *
47  * This is one of the rare exceptions where it's justified
48  * to use a raw spinlock - we really dont want the spinlock
49  * code to recurse back into the lockdep code.
50  */
51 static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
52
53 static int lockdep_initialized;
54
55 unsigned long nr_list_entries;
56 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
57
58 /*
59  * Allocate a lockdep entry. (assumes hash_lock held, returns
60  * with NULL on failure)
61  */
62 static struct lock_list *alloc_list_entry(void)
63 {
64         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
65                 __raw_spin_unlock(&hash_lock);
66                 debug_locks_off();
67                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
68                 printk("turning off the locking correctness validator.\n");
69                 return NULL;
70         }
71         return list_entries + nr_list_entries++;
72 }
73
74 /*
75  * All data structures here are protected by the global debug_lock.
76  *
77  * Mutex key structs only get allocated, once during bootup, and never
78  * get freed - this significantly simplifies the debugging code.
79  */
80 unsigned long nr_lock_classes;
81 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
82
83 /*
84  * We keep a global list of all lock classes. The list only grows,
85  * never shrinks. The list is only accessed with the lockdep
86  * spinlock lock held.
87  */
88 LIST_HEAD(all_lock_classes);
89
90 /*
91  * The lockdep classes are in a hash-table as well, for fast lookup:
92  */
93 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
94 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
95 #define CLASSHASH_MASK          (CLASSHASH_SIZE - 1)
96 #define __classhashfn(key)      ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
97 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
98
99 static struct list_head classhash_table[CLASSHASH_SIZE];
100
101 unsigned long nr_lock_chains;
102 static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
103
104 /*
105  * We put the lock dependency chains into a hash-table as well, to cache
106  * their existence:
107  */
108 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
109 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
110 #define CHAINHASH_MASK          (CHAINHASH_SIZE - 1)
111 #define __chainhashfn(chain) \
112                 (((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
113 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
114
115 static struct list_head chainhash_table[CHAINHASH_SIZE];
116
117 /*
118  * The hash key of the lock dependency chains is a hash itself too:
119  * it's a hash of all locks taken up to that lock, including that lock.
120  * It's a 64-bit hash, because it's important for the keys to be
121  * unique.
122  */
123 #define iterate_chain_key(key1, key2) \
124         (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \
125         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \
126         (key2))
127
128 void lockdep_off(void)
129 {
130         current->lockdep_recursion++;
131 }
132
133 EXPORT_SYMBOL(lockdep_off);
134
135 void lockdep_on(void)
136 {
137         current->lockdep_recursion--;
138 }
139
140 EXPORT_SYMBOL(lockdep_on);
141
142 int lockdep_internal(void)
143 {
144         return current->lockdep_recursion != 0;
145 }
146
147 EXPORT_SYMBOL(lockdep_internal);
148
149 /*
150  * Debugging switches:
151  */
152
153 #define VERBOSE                 0
154 #ifdef VERBOSE
155 # define VERY_VERBOSE           0
156 #endif
157
158 #if VERBOSE
159 # define HARDIRQ_VERBOSE        1
160 # define SOFTIRQ_VERBOSE        1
161 #else
162 # define HARDIRQ_VERBOSE        0
163 # define SOFTIRQ_VERBOSE        0
164 #endif
165
166 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
167 /*
168  * Quick filtering for interesting events:
169  */
170 static int class_filter(struct lock_class *class)
171 {
172 #if 0
173         /* Example */
174         if (class->name_version == 1 &&
175                         !strcmp(class->name, "lockname"))
176                 return 1;
177         if (class->name_version == 1 &&
178                         !strcmp(class->name, "&struct->lockfield"))
179                 return 1;
180 #endif
181         /* Allow everything else. 0 would be filter everything else */
182         return 1;
183 }
184 #endif
185
186 static int verbose(struct lock_class *class)
187 {
188 #if VERBOSE
189         return class_filter(class);
190 #endif
191         return 0;
192 }
193
194 #ifdef CONFIG_TRACE_IRQFLAGS
195
196 static int hardirq_verbose(struct lock_class *class)
197 {
198 #if HARDIRQ_VERBOSE
199         return class_filter(class);
200 #endif
201         return 0;
202 }
203
204 static int softirq_verbose(struct lock_class *class)
205 {
206 #if SOFTIRQ_VERBOSE
207         return class_filter(class);
208 #endif
209         return 0;
210 }
211
212 #endif
213
214 /*
215  * Stack-trace: tightly packed array of stack backtrace
216  * addresses. Protected by the hash_lock.
217  */
218 unsigned long nr_stack_trace_entries;
219 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
220
221 static int save_trace(struct stack_trace *trace)
222 {
223         trace->nr_entries = 0;
224         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
225         trace->entries = stack_trace + nr_stack_trace_entries;
226
227         save_stack_trace(trace, NULL, 0, 3);
228
229         trace->max_entries = trace->nr_entries;
230
231         nr_stack_trace_entries += trace->nr_entries;
232         if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
233                 return 0;
234
235         if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
236                 __raw_spin_unlock(&hash_lock);
237                 if (debug_locks_off()) {
238                         printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
239                         printk("turning off the locking correctness validator.\n");
240                         dump_stack();
241                 }
242                 return 0;
243         }
244
245         return 1;
246 }
247
248 unsigned int nr_hardirq_chains;
249 unsigned int nr_softirq_chains;
250 unsigned int nr_process_chains;
251 unsigned int max_lockdep_depth;
252 unsigned int max_recursion_depth;
253
254 #ifdef CONFIG_DEBUG_LOCKDEP
255 /*
256  * We cannot printk in early bootup code. Not even early_printk()
257  * might work. So we mark any initialization errors and printk
258  * about it later on, in lockdep_info().
259  */
260 static int lockdep_init_error;
261
262 /*
263  * Various lockdep statistics:
264  */
265 atomic_t chain_lookup_hits;
266 atomic_t chain_lookup_misses;
267 atomic_t hardirqs_on_events;
268 atomic_t hardirqs_off_events;
269 atomic_t redundant_hardirqs_on;
270 atomic_t redundant_hardirqs_off;
271 atomic_t softirqs_on_events;
272 atomic_t softirqs_off_events;
273 atomic_t redundant_softirqs_on;
274 atomic_t redundant_softirqs_off;
275 atomic_t nr_unused_locks;
276 atomic_t nr_cyclic_checks;
277 atomic_t nr_cyclic_check_recursions;
278 atomic_t nr_find_usage_forwards_checks;
279 atomic_t nr_find_usage_forwards_recursions;
280 atomic_t nr_find_usage_backwards_checks;
281 atomic_t nr_find_usage_backwards_recursions;
282 # define debug_atomic_inc(ptr)          atomic_inc(ptr)
283 # define debug_atomic_dec(ptr)          atomic_dec(ptr)
284 # define debug_atomic_read(ptr)         atomic_read(ptr)
285 #else
286 # define debug_atomic_inc(ptr)          do { } while (0)
287 # define debug_atomic_dec(ptr)          do { } while (0)
288 # define debug_atomic_read(ptr)         0
289 #endif
290
291 /*
292  * Locking printouts:
293  */
294
295 static const char *usage_str[] =
296 {
297         [LOCK_USED] =                   "initial-use ",
298         [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
299         [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
300         [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
301         [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
302         [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
303         [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
304         [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
305         [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
306 };
307
308 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
309 {
310         unsigned long offs, size;
311         char *modname;
312
313         return kallsyms_lookup((unsigned long)key, &size, &offs, &modname, str);
314 }
315
316 void
317 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
318 {
319         *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
320
321         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
322                 *c1 = '+';
323         else
324                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
325                         *c1 = '-';
326
327         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
328                 *c2 = '+';
329         else
330                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
331                         *c2 = '-';
332
333         if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
334                 *c3 = '-';
335         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
336                 *c3 = '+';
337                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
338                         *c3 = '?';
339         }
340
341         if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
342                 *c4 = '-';
343         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
344                 *c4 = '+';
345                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
346                         *c4 = '?';
347         }
348 }
349
350 static void print_lock_name(struct lock_class *class)
351 {
352         char str[128], c1, c2, c3, c4;
353         const char *name;
354
355         get_usage_chars(class, &c1, &c2, &c3, &c4);
356
357         name = class->name;
358         if (!name) {
359                 name = __get_key_name(class->key, str);
360                 printk(" (%s", name);
361         } else {
362                 printk(" (%s", name);
363                 if (class->name_version > 1)
364                         printk("#%d", class->name_version);
365                 if (class->subclass)
366                         printk("/%d", class->subclass);
367         }
368         printk("){%c%c%c%c}", c1, c2, c3, c4);
369 }
370
371 static void print_lockdep_cache(struct lockdep_map *lock)
372 {
373         const char *name;
374         char str[128];
375
376         name = lock->name;
377         if (!name)
378                 name = __get_key_name(lock->key->subkeys, str);
379
380         printk("%s", name);
381 }
382
383 static void print_lock(struct held_lock *hlock)
384 {
385         print_lock_name(hlock->class);
386         printk(", at: ");
387         print_ip_sym(hlock->acquire_ip);
388 }
389
390 static void lockdep_print_held_locks(struct task_struct *curr)
391 {
392         int i, depth = curr->lockdep_depth;
393
394         if (!depth) {
395                 printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
396                 return;
397         }
398         printk("%d lock%s held by %s/%d:\n",
399                 depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
400
401         for (i = 0; i < depth; i++) {
402                 printk(" #%d: ", i);
403                 print_lock(curr->held_locks + i);
404         }
405 }
406
407 static void print_lock_class_header(struct lock_class *class, int depth)
408 {
409         int bit;
410
411         printk("%*s->", depth, "");
412         print_lock_name(class);
413         printk(" ops: %lu", class->ops);
414         printk(" {\n");
415
416         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
417                 if (class->usage_mask & (1 << bit)) {
418                         int len = depth;
419
420                         len += printk("%*s   %s", depth, "", usage_str[bit]);
421                         len += printk(" at:\n");
422                         print_stack_trace(class->usage_traces + bit, len);
423                 }
424         }
425         printk("%*s }\n", depth, "");
426
427         printk("%*s ... key      at: ",depth,"");
428         print_ip_sym((unsigned long)class->key);
429 }
430
431 /*
432  * printk all lock dependencies starting at <entry>:
433  */
434 static void print_lock_dependencies(struct lock_class *class, int depth)
435 {
436         struct lock_list *entry;
437
438         if (DEBUG_LOCKS_WARN_ON(depth >= 20))
439                 return;
440
441         print_lock_class_header(class, depth);
442
443         list_for_each_entry(entry, &class->locks_after, entry) {
444                 DEBUG_LOCKS_WARN_ON(!entry->class);
445                 print_lock_dependencies(entry->class, depth + 1);
446
447                 printk("%*s ... acquired at:\n",depth,"");
448                 print_stack_trace(&entry->trace, 2);
449                 printk("\n");
450         }
451 }
452
453 /*
454  * Add a new dependency to the head of the list:
455  */
456 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
457                             struct list_head *head, unsigned long ip)
458 {
459         struct lock_list *entry;
460         /*
461          * Lock not present yet - get a new dependency struct and
462          * add it to the list:
463          */
464         entry = alloc_list_entry();
465         if (!entry)
466                 return 0;
467
468         entry->class = this;
469         save_trace(&entry->trace);
470
471         /*
472          * Since we never remove from the dependency list, the list can
473          * be walked lockless by other CPUs, it's only allocation
474          * that must be protected by the spinlock. But this also means
475          * we must make new entries visible only once writes to the
476          * entry become visible - hence the RCU op:
477          */
478         list_add_tail_rcu(&entry->entry, head);
479
480         return 1;
481 }
482
483 /*
484  * Recursive, forwards-direction lock-dependency checking, used for
485  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
486  * checking.
487  *
488  * (to keep the stackframe of the recursive functions small we
489  *  use these global variables, and we also mark various helper
490  *  functions as noinline.)
491  */
492 static struct held_lock *check_source, *check_target;
493
494 /*
495  * Print a dependency chain entry (this is only done when a deadlock
496  * has been detected):
497  */
498 static noinline int
499 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
500 {
501         if (debug_locks_silent)
502                 return 0;
503         printk("\n-> #%u", depth);
504         print_lock_name(target->class);
505         printk(":\n");
506         print_stack_trace(&target->trace, 6);
507
508         return 0;
509 }
510
511 /*
512  * When a circular dependency is detected, print the
513  * header first:
514  */
515 static noinline int
516 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
517 {
518         struct task_struct *curr = current;
519
520         __raw_spin_unlock(&hash_lock);
521         debug_locks_off();
522         if (debug_locks_silent)
523                 return 0;
524
525         printk("\n=======================================================\n");
526         printk(  "[ INFO: possible circular locking dependency detected ]\n");
527         printk(  "-------------------------------------------------------\n");
528         printk("%s/%d is trying to acquire lock:\n",
529                 curr->comm, curr->pid);
530         print_lock(check_source);
531         printk("\nbut task is already holding lock:\n");
532         print_lock(check_target);
533         printk("\nwhich lock already depends on the new lock.\n\n");
534         printk("\nthe existing dependency chain (in reverse order) is:\n");
535
536         print_circular_bug_entry(entry, depth);
537
538         return 0;
539 }
540
541 static noinline int print_circular_bug_tail(void)
542 {
543         struct task_struct *curr = current;
544         struct lock_list this;
545
546         if (debug_locks_silent)
547                 return 0;
548
549         this.class = check_source->class;
550         save_trace(&this.trace);
551         print_circular_bug_entry(&this, 0);
552
553         printk("\nother info that might help us debug this:\n\n");
554         lockdep_print_held_locks(curr);
555
556         printk("\nstack backtrace:\n");
557         dump_stack();
558
559         return 0;
560 }
561
562 static int noinline print_infinite_recursion_bug(void)
563 {
564         __raw_spin_unlock(&hash_lock);
565         DEBUG_LOCKS_WARN_ON(1);
566
567         return 0;
568 }
569
570 /*
571  * Prove that the dependency graph starting at <entry> can not
572  * lead to <target>. Print an error and return 0 if it does.
573  */
574 static noinline int
575 check_noncircular(struct lock_class *source, unsigned int depth)
576 {
577         struct lock_list *entry;
578
579         debug_atomic_inc(&nr_cyclic_check_recursions);
580         if (depth > max_recursion_depth)
581                 max_recursion_depth = depth;
582         if (depth >= 20)
583                 return print_infinite_recursion_bug();
584         /*
585          * Check this lock's dependency list:
586          */
587         list_for_each_entry(entry, &source->locks_after, entry) {
588                 if (entry->class == check_target->class)
589                         return print_circular_bug_header(entry, depth+1);
590                 debug_atomic_inc(&nr_cyclic_checks);
591                 if (!check_noncircular(entry->class, depth+1))
592                         return print_circular_bug_entry(entry, depth+1);
593         }
594         return 1;
595 }
596
597 static int very_verbose(struct lock_class *class)
598 {
599 #if VERY_VERBOSE
600         return class_filter(class);
601 #endif
602         return 0;
603 }
604 #ifdef CONFIG_TRACE_IRQFLAGS
605
606 /*
607  * Forwards and backwards subgraph searching, for the purposes of
608  * proving that two subgraphs can be connected by a new dependency
609  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
610  */
611 static enum lock_usage_bit find_usage_bit;
612 static struct lock_class *forwards_match, *backwards_match;
613
614 /*
615  * Find a node in the forwards-direction dependency sub-graph starting
616  * at <source> that matches <find_usage_bit>.
617  *
618  * Return 2 if such a node exists in the subgraph, and put that node
619  * into <forwards_match>.
620  *
621  * Return 1 otherwise and keep <forwards_match> unchanged.
622  * Return 0 on error.
623  */
624 static noinline int
625 find_usage_forwards(struct lock_class *source, unsigned int depth)
626 {
627         struct lock_list *entry;
628         int ret;
629
630         if (depth > max_recursion_depth)
631                 max_recursion_depth = depth;
632         if (depth >= 20)
633                 return print_infinite_recursion_bug();
634
635         debug_atomic_inc(&nr_find_usage_forwards_checks);
636         if (source->usage_mask & (1 << find_usage_bit)) {
637                 forwards_match = source;
638                 return 2;
639         }
640
641         /*
642          * Check this lock's dependency list:
643          */
644         list_for_each_entry(entry, &source->locks_after, entry) {
645                 debug_atomic_inc(&nr_find_usage_forwards_recursions);
646                 ret = find_usage_forwards(entry->class, depth+1);
647                 if (ret == 2 || ret == 0)
648                         return ret;
649         }
650         return 1;
651 }
652
653 /*
654  * Find a node in the backwards-direction dependency sub-graph starting
655  * at <source> that matches <find_usage_bit>.
656  *
657  * Return 2 if such a node exists in the subgraph, and put that node
658  * into <backwards_match>.
659  *
660  * Return 1 otherwise and keep <backwards_match> unchanged.
661  * Return 0 on error.
662  */
663 static noinline int
664 find_usage_backwards(struct lock_class *source, unsigned int depth)
665 {
666         struct lock_list *entry;
667         int ret;
668
669         if (depth > max_recursion_depth)
670                 max_recursion_depth = depth;
671         if (depth >= 20)
672                 return print_infinite_recursion_bug();
673
674         debug_atomic_inc(&nr_find_usage_backwards_checks);
675         if (source->usage_mask & (1 << find_usage_bit)) {
676                 backwards_match = source;
677                 return 2;
678         }
679
680         /*
681          * Check this lock's dependency list:
682          */
683         list_for_each_entry(entry, &source->locks_before, entry) {
684                 debug_atomic_inc(&nr_find_usage_backwards_recursions);
685                 ret = find_usage_backwards(entry->class, depth+1);
686                 if (ret == 2 || ret == 0)
687                         return ret;
688         }
689         return 1;
690 }
691
692 static int
693 print_bad_irq_dependency(struct task_struct *curr,
694                          struct held_lock *prev,
695                          struct held_lock *next,
696                          enum lock_usage_bit bit1,
697                          enum lock_usage_bit bit2,
698                          const char *irqclass)
699 {
700         __raw_spin_unlock(&hash_lock);
701         debug_locks_off();
702         if (debug_locks_silent)
703                 return 0;
704
705         printk("\n======================================================\n");
706         printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
707                 irqclass, irqclass);
708         printk(  "------------------------------------------------------\n");
709         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
710                 curr->comm, curr->pid,
711                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
712                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
713                 curr->hardirqs_enabled,
714                 curr->softirqs_enabled);
715         print_lock(next);
716
717         printk("\nand this task is already holding:\n");
718         print_lock(prev);
719         printk("which would create a new lock dependency:\n");
720         print_lock_name(prev->class);
721         printk(" ->");
722         print_lock_name(next->class);
723         printk("\n");
724
725         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
726                 irqclass);
727         print_lock_name(backwards_match);
728         printk("\n... which became %s-irq-safe at:\n", irqclass);
729
730         print_stack_trace(backwards_match->usage_traces + bit1, 1);
731
732         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
733         print_lock_name(forwards_match);
734         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
735         printk("...");
736
737         print_stack_trace(forwards_match->usage_traces + bit2, 1);
738
739         printk("\nother info that might help us debug this:\n\n");
740         lockdep_print_held_locks(curr);
741
742         printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
743         print_lock_dependencies(backwards_match, 0);
744
745         printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
746         print_lock_dependencies(forwards_match, 0);
747
748         printk("\nstack backtrace:\n");
749         dump_stack();
750
751         return 0;
752 }
753
754 static int
755 check_usage(struct task_struct *curr, struct held_lock *prev,
756             struct held_lock *next, enum lock_usage_bit bit_backwards,
757             enum lock_usage_bit bit_forwards, const char *irqclass)
758 {
759         int ret;
760
761         find_usage_bit = bit_backwards;
762         /* fills in <backwards_match> */
763         ret = find_usage_backwards(prev->class, 0);
764         if (!ret || ret == 1)
765                 return ret;
766
767         find_usage_bit = bit_forwards;
768         ret = find_usage_forwards(next->class, 0);
769         if (!ret || ret == 1)
770                 return ret;
771         /* ret == 2 */
772         return print_bad_irq_dependency(curr, prev, next,
773                         bit_backwards, bit_forwards, irqclass);
774 }
775
776 #endif
777
778 static int
779 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
780                    struct held_lock *next)
781 {
782         debug_locks_off();
783         __raw_spin_unlock(&hash_lock);
784         if (debug_locks_silent)
785                 return 0;
786
787         printk("\n=============================================\n");
788         printk(  "[ INFO: possible recursive locking detected ]\n");
789         printk(  "---------------------------------------------\n");
790         printk("%s/%d is trying to acquire lock:\n",
791                 curr->comm, curr->pid);
792         print_lock(next);
793         printk("\nbut task is already holding lock:\n");
794         print_lock(prev);
795
796         printk("\nother info that might help us debug this:\n");
797         lockdep_print_held_locks(curr);
798
799         printk("\nstack backtrace:\n");
800         dump_stack();
801
802         return 0;
803 }
804
805 /*
806  * Check whether we are holding such a class already.
807  *
808  * (Note that this has to be done separately, because the graph cannot
809  * detect such classes of deadlocks.)
810  *
811  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
812  */
813 static int
814 check_deadlock(struct task_struct *curr, struct held_lock *next,
815                struct lockdep_map *next_instance, int read)
816 {
817         struct held_lock *prev;
818         int i;
819
820         for (i = 0; i < curr->lockdep_depth; i++) {
821                 prev = curr->held_locks + i;
822                 if (prev->class != next->class)
823                         continue;
824                 /*
825                  * Allow read-after-read recursion of the same
826                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
827                  */
828                 if ((read == 2) && prev->read)
829                         return 2;
830                 return print_deadlock_bug(curr, prev, next);
831         }
832         return 1;
833 }
834
835 /*
836  * There was a chain-cache miss, and we are about to add a new dependency
837  * to a previous lock. We recursively validate the following rules:
838  *
839  *  - would the adding of the <prev> -> <next> dependency create a
840  *    circular dependency in the graph? [== circular deadlock]
841  *
842  *  - does the new prev->next dependency connect any hardirq-safe lock
843  *    (in the full backwards-subgraph starting at <prev>) with any
844  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
845  *    <next>)? [== illegal lock inversion with hardirq contexts]
846  *
847  *  - does the new prev->next dependency connect any softirq-safe lock
848  *    (in the full backwards-subgraph starting at <prev>) with any
849  *    softirq-unsafe lock (in the full forwards-subgraph starting at
850  *    <next>)? [== illegal lock inversion with softirq contexts]
851  *
852  * any of these scenarios could lead to a deadlock.
853  *
854  * Then if all the validations pass, we add the forwards and backwards
855  * dependency.
856  */
857 static int
858 check_prev_add(struct task_struct *curr, struct held_lock *prev,
859                struct held_lock *next)
860 {
861         struct lock_list *entry;
862         int ret;
863
864         /*
865          * Prove that the new <prev> -> <next> dependency would not
866          * create a circular dependency in the graph. (We do this by
867          * forward-recursing into the graph starting at <next>, and
868          * checking whether we can reach <prev>.)
869          *
870          * We are using global variables to control the recursion, to
871          * keep the stackframe size of the recursive functions low:
872          */
873         check_source = next;
874         check_target = prev;
875         if (!(check_noncircular(next->class, 0)))
876                 return print_circular_bug_tail();
877
878 #ifdef CONFIG_TRACE_IRQFLAGS
879         /*
880          * Prove that the new dependency does not connect a hardirq-safe
881          * lock with a hardirq-unsafe lock - to achieve this we search
882          * the backwards-subgraph starting at <prev>, and the
883          * forwards-subgraph starting at <next>:
884          */
885         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
886                                         LOCK_ENABLED_HARDIRQS, "hard"))
887                 return 0;
888
889         /*
890          * Prove that the new dependency does not connect a hardirq-safe-read
891          * lock with a hardirq-unsafe lock - to achieve this we search
892          * the backwards-subgraph starting at <prev>, and the
893          * forwards-subgraph starting at <next>:
894          */
895         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
896                                         LOCK_ENABLED_HARDIRQS, "hard-read"))
897                 return 0;
898
899         /*
900          * Prove that the new dependency does not connect a softirq-safe
901          * lock with a softirq-unsafe lock - to achieve this we search
902          * the backwards-subgraph starting at <prev>, and the
903          * forwards-subgraph starting at <next>:
904          */
905         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
906                                         LOCK_ENABLED_SOFTIRQS, "soft"))
907                 return 0;
908         /*
909          * Prove that the new dependency does not connect a softirq-safe-read
910          * lock with a softirq-unsafe lock - to achieve this we search
911          * the backwards-subgraph starting at <prev>, and the
912          * forwards-subgraph starting at <next>:
913          */
914         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
915                                         LOCK_ENABLED_SOFTIRQS, "soft"))
916                 return 0;
917 #endif
918         /*
919          * For recursive read-locks we do all the dependency checks,
920          * but we dont store read-triggered dependencies (only
921          * write-triggered dependencies). This ensures that only the
922          * write-side dependencies matter, and that if for example a
923          * write-lock never takes any other locks, then the reads are
924          * equivalent to a NOP.
925          */
926         if (next->read == 2 || prev->read == 2)
927                 return 1;
928         /*
929          * Is the <prev> -> <next> dependency already present?
930          *
931          * (this may occur even though this is a new chain: consider
932          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
933          *  chains - the second one will be new, but L1 already has
934          *  L2 added to its dependency list, due to the first chain.)
935          */
936         list_for_each_entry(entry, &prev->class->locks_after, entry) {
937                 if (entry->class == next->class)
938                         return 2;
939         }
940
941         /*
942          * Ok, all validations passed, add the new lock
943          * to the previous lock's dependency list:
944          */
945         ret = add_lock_to_list(prev->class, next->class,
946                                &prev->class->locks_after, next->acquire_ip);
947         if (!ret)
948                 return 0;
949         /*
950          * Return value of 2 signals 'dependency already added',
951          * in that case we dont have to add the backlink either.
952          */
953         if (ret == 2)
954                 return 2;
955         ret = add_lock_to_list(next->class, prev->class,
956                                &next->class->locks_before, next->acquire_ip);
957
958         /*
959          * Debugging printouts:
960          */
961         if (verbose(prev->class) || verbose(next->class)) {
962                 __raw_spin_unlock(&hash_lock);
963                 printk("\n new dependency: ");
964                 print_lock_name(prev->class);
965                 printk(" => ");
966                 print_lock_name(next->class);
967                 printk("\n");
968                 dump_stack();
969                 __raw_spin_lock(&hash_lock);
970         }
971         return 1;
972 }
973
974 /*
975  * Add the dependency to all directly-previous locks that are 'relevant'.
976  * The ones that are relevant are (in increasing distance from curr):
977  * all consecutive trylock entries and the final non-trylock entry - or
978  * the end of this context's lock-chain - whichever comes first.
979  */
980 static int
981 check_prevs_add(struct task_struct *curr, struct held_lock *next)
982 {
983         int depth = curr->lockdep_depth;
984         struct held_lock *hlock;
985
986         /*
987          * Debugging checks.
988          *
989          * Depth must not be zero for a non-head lock:
990          */
991         if (!depth)
992                 goto out_bug;
993         /*
994          * At least two relevant locks must exist for this
995          * to be a head:
996          */
997         if (curr->held_locks[depth].irq_context !=
998                         curr->held_locks[depth-1].irq_context)
999                 goto out_bug;
1000
1001         for (;;) {
1002                 hlock = curr->held_locks + depth-1;
1003                 /*
1004                  * Only non-recursive-read entries get new dependencies
1005                  * added:
1006                  */
1007                 if (hlock->read != 2) {
1008                         check_prev_add(curr, hlock, next);
1009                         /*
1010                          * Stop after the first non-trylock entry,
1011                          * as non-trylock entries have added their
1012                          * own direct dependencies already, so this
1013                          * lock is connected to them indirectly:
1014                          */
1015                         if (!hlock->trylock)
1016                                 break;
1017                 }
1018                 depth--;
1019                 /*
1020                  * End of lock-stack?
1021                  */
1022                 if (!depth)
1023                         break;
1024                 /*
1025                  * Stop the search if we cross into another context:
1026                  */
1027                 if (curr->held_locks[depth].irq_context !=
1028                                 curr->held_locks[depth-1].irq_context)
1029                         break;
1030         }
1031         return 1;
1032 out_bug:
1033         __raw_spin_unlock(&hash_lock);
1034         DEBUG_LOCKS_WARN_ON(1);
1035
1036         return 0;
1037 }
1038
1039
1040 /*
1041  * Is this the address of a static object:
1042  */
1043 static int static_obj(void *obj)
1044 {
1045         unsigned long start = (unsigned long) &_stext,
1046                       end   = (unsigned long) &_end,
1047                       addr  = (unsigned long) obj;
1048 #ifdef CONFIG_SMP
1049         int i;
1050 #endif
1051
1052         /*
1053          * static variable?
1054          */
1055         if ((addr >= start) && (addr < end))
1056                 return 1;
1057
1058 #ifdef CONFIG_SMP
1059         /*
1060          * percpu var?
1061          */
1062         for_each_possible_cpu(i) {
1063                 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1064                 end   = (unsigned long) &__per_cpu_end   + per_cpu_offset(i);
1065
1066                 if ((addr >= start) && (addr < end))
1067                         return 1;
1068         }
1069 #endif
1070
1071         /*
1072          * module var?
1073          */
1074         return is_module_address(addr);
1075 }
1076
1077 /*
1078  * To make lock name printouts unique, we calculate a unique
1079  * class->name_version generation counter:
1080  */
1081 static int count_matching_names(struct lock_class *new_class)
1082 {
1083         struct lock_class *class;
1084         int count = 0;
1085
1086         if (!new_class->name)
1087                 return 0;
1088
1089         list_for_each_entry(class, &all_lock_classes, lock_entry) {
1090                 if (new_class->key - new_class->subclass == class->key)
1091                         return class->name_version;
1092                 if (class->name && !strcmp(class->name, new_class->name))
1093                         count = max(count, class->name_version);
1094         }
1095
1096         return count + 1;
1097 }
1098
1099 extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
1100
1101 /*
1102  * Register a lock's class in the hash-table, if the class is not present
1103  * yet. Otherwise we look it up. We cache the result in the lock object
1104  * itself, so actual lookup of the hash should be once per lock object.
1105  */
1106 static inline struct lock_class *
1107 register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1108 {
1109         struct lockdep_subclass_key *key;
1110         struct list_head *hash_head;
1111         struct lock_class *class;
1112
1113 #ifdef CONFIG_DEBUG_LOCKDEP
1114         /*
1115          * If the architecture calls into lockdep before initializing
1116          * the hashes then we'll warn about it later. (we cannot printk
1117          * right now)
1118          */
1119         if (unlikely(!lockdep_initialized)) {
1120                 lockdep_init();
1121                 lockdep_init_error = 1;
1122         }
1123 #endif
1124
1125         /*
1126          * Static locks do not have their class-keys yet - for them the key
1127          * is the lock object itself:
1128          */
1129         if (unlikely(!lock->key))
1130                 lock->key = (void *)lock;
1131
1132         /*
1133          * NOTE: the class-key must be unique. For dynamic locks, a static
1134          * lock_class_key variable is passed in through the mutex_init()
1135          * (or spin_lock_init()) call - which acts as the key. For static
1136          * locks we use the lock object itself as the key.
1137          */
1138         if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
1139                 __error_too_big_MAX_LOCKDEP_SUBCLASSES();
1140
1141         key = lock->key->subkeys + subclass;
1142
1143         hash_head = classhashentry(key);
1144
1145         /*
1146          * We can walk the hash lockfree, because the hash only
1147          * grows, and we are careful when adding entries to the end:
1148          */
1149         list_for_each_entry(class, hash_head, hash_entry)
1150                 if (class->key == key)
1151                         goto out_set;
1152
1153         /*
1154          * Debug-check: all keys must be persistent!
1155          */
1156         if (!static_obj(lock->key)) {
1157                 debug_locks_off();
1158                 printk("INFO: trying to register non-static key.\n");
1159                 printk("the code is fine but needs lockdep annotation.\n");
1160                 printk("turning off the locking correctness validator.\n");
1161                 dump_stack();
1162
1163                 return NULL;
1164         }
1165
1166         __raw_spin_lock(&hash_lock);
1167         /*
1168          * We have to do the hash-walk again, to avoid races
1169          * with another CPU:
1170          */
1171         list_for_each_entry(class, hash_head, hash_entry)
1172                 if (class->key == key)
1173                         goto out_unlock_set;
1174         /*
1175          * Allocate a new key from the static array, and add it to
1176          * the hash:
1177          */
1178         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1179                 __raw_spin_unlock(&hash_lock);
1180                 debug_locks_off();
1181                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1182                 printk("turning off the locking correctness validator.\n");
1183                 return NULL;
1184         }
1185         class = lock_classes + nr_lock_classes++;
1186         debug_atomic_inc(&nr_unused_locks);
1187         class->key = key;
1188         class->name = lock->name;
1189         class->subclass = subclass;
1190         INIT_LIST_HEAD(&class->lock_entry);
1191         INIT_LIST_HEAD(&class->locks_before);
1192         INIT_LIST_HEAD(&class->locks_after);
1193         class->name_version = count_matching_names(class);
1194         /*
1195          * We use RCU's safe list-add method to make
1196          * parallel walking of the hash-list safe:
1197          */
1198         list_add_tail_rcu(&class->hash_entry, hash_head);
1199
1200         if (verbose(class)) {
1201                 __raw_spin_unlock(&hash_lock);
1202                 printk("\nnew class %p: %s", class->key, class->name);
1203                 if (class->name_version > 1)
1204                         printk("#%d", class->name_version);
1205                 printk("\n");
1206                 dump_stack();
1207                 __raw_spin_lock(&hash_lock);
1208         }
1209 out_unlock_set:
1210         __raw_spin_unlock(&hash_lock);
1211
1212 out_set:
1213         lock->class[subclass] = class;
1214
1215         DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
1216
1217         return class;
1218 }
1219
1220 /*
1221  * Look up a dependency chain. If the key is not present yet then
1222  * add it and return 0 - in this case the new dependency chain is
1223  * validated. If the key is already hashed, return 1.
1224  */
1225 static inline int lookup_chain_cache(u64 chain_key)
1226 {
1227         struct list_head *hash_head = chainhashentry(chain_key);
1228         struct lock_chain *chain;
1229
1230         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1231         /*
1232          * We can walk it lock-free, because entries only get added
1233          * to the hash:
1234          */
1235         list_for_each_entry(chain, hash_head, entry) {
1236                 if (chain->chain_key == chain_key) {
1237 cache_hit:
1238                         debug_atomic_inc(&chain_lookup_hits);
1239                         /*
1240                          * In the debugging case, force redundant checking
1241                          * by returning 1:
1242                          */
1243 #ifdef CONFIG_DEBUG_LOCKDEP
1244                         __raw_spin_lock(&hash_lock);
1245                         return 1;
1246 #endif
1247                         return 0;
1248                 }
1249         }
1250         /*
1251          * Allocate a new chain entry from the static array, and add
1252          * it to the hash:
1253          */
1254         __raw_spin_lock(&hash_lock);
1255         /*
1256          * We have to walk the chain again locked - to avoid duplicates:
1257          */
1258         list_for_each_entry(chain, hash_head, entry) {
1259                 if (chain->chain_key == chain_key) {
1260                         __raw_spin_unlock(&hash_lock);
1261                         goto cache_hit;
1262                 }
1263         }
1264         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1265                 __raw_spin_unlock(&hash_lock);
1266                 debug_locks_off();
1267                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1268                 printk("turning off the locking correctness validator.\n");
1269                 return 0;
1270         }
1271         chain = lock_chains + nr_lock_chains++;
1272         chain->chain_key = chain_key;
1273         list_add_tail_rcu(&chain->entry, hash_head);
1274         debug_atomic_inc(&chain_lookup_misses);
1275 #ifdef CONFIG_TRACE_IRQFLAGS
1276         if (current->hardirq_context)
1277                 nr_hardirq_chains++;
1278         else {
1279                 if (current->softirq_context)
1280                         nr_softirq_chains++;
1281                 else
1282                         nr_process_chains++;
1283         }
1284 #else
1285         nr_process_chains++;
1286 #endif
1287
1288         return 1;
1289 }
1290
1291 /*
1292  * We are building curr_chain_key incrementally, so double-check
1293  * it from scratch, to make sure that it's done correctly:
1294  */
1295 static void check_chain_key(struct task_struct *curr)
1296 {
1297 #ifdef CONFIG_DEBUG_LOCKDEP
1298         struct held_lock *hlock, *prev_hlock = NULL;
1299         unsigned int i, id;
1300         u64 chain_key = 0;
1301
1302         for (i = 0; i < curr->lockdep_depth; i++) {
1303                 hlock = curr->held_locks + i;
1304                 if (chain_key != hlock->prev_chain_key) {
1305                         debug_locks_off();
1306                         printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1307                                 curr->lockdep_depth, i,
1308                                 (unsigned long long)chain_key,
1309                                 (unsigned long long)hlock->prev_chain_key);
1310                         WARN_ON(1);
1311                         return;
1312                 }
1313                 id = hlock->class - lock_classes;
1314                 DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS);
1315                 if (prev_hlock && (prev_hlock->irq_context !=
1316                                                         hlock->irq_context))
1317                         chain_key = 0;
1318                 chain_key = iterate_chain_key(chain_key, id);
1319                 prev_hlock = hlock;
1320         }
1321         if (chain_key != curr->curr_chain_key) {
1322                 debug_locks_off();
1323                 printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1324                         curr->lockdep_depth, i,
1325                         (unsigned long long)chain_key,
1326                         (unsigned long long)curr->curr_chain_key);
1327                 WARN_ON(1);
1328         }
1329 #endif
1330 }
1331
1332 #ifdef CONFIG_TRACE_IRQFLAGS
1333
1334 /*
1335  * print irq inversion bug:
1336  */
1337 static int
1338 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1339                         struct held_lock *this, int forwards,
1340                         const char *irqclass)
1341 {
1342         __raw_spin_unlock(&hash_lock);
1343         debug_locks_off();
1344         if (debug_locks_silent)
1345                 return 0;
1346
1347         printk("\n=========================================================\n");
1348         printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1349         printk(  "---------------------------------------------------------\n");
1350         printk("%s/%d just changed the state of lock:\n",
1351                 curr->comm, curr->pid);
1352         print_lock(this);
1353         if (forwards)
1354                 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1355         else
1356                 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1357         print_lock_name(other);
1358         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1359
1360         printk("\nother info that might help us debug this:\n");
1361         lockdep_print_held_locks(curr);
1362
1363         printk("\nthe first lock's dependencies:\n");
1364         print_lock_dependencies(this->class, 0);
1365
1366         printk("\nthe second lock's dependencies:\n");
1367         print_lock_dependencies(other, 0);
1368
1369         printk("\nstack backtrace:\n");
1370         dump_stack();
1371
1372         return 0;
1373 }
1374
1375 /*
1376  * Prove that in the forwards-direction subgraph starting at <this>
1377  * there is no lock matching <mask>:
1378  */
1379 static int
1380 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1381                      enum lock_usage_bit bit, const char *irqclass)
1382 {
1383         int ret;
1384
1385         find_usage_bit = bit;
1386         /* fills in <forwards_match> */
1387         ret = find_usage_forwards(this->class, 0);
1388         if (!ret || ret == 1)
1389                 return ret;
1390
1391         return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1392 }
1393
1394 /*
1395  * Prove that in the backwards-direction subgraph starting at <this>
1396  * there is no lock matching <mask>:
1397  */
1398 static int
1399 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1400                       enum lock_usage_bit bit, const char *irqclass)
1401 {
1402         int ret;
1403
1404         find_usage_bit = bit;
1405         /* fills in <backwards_match> */
1406         ret = find_usage_backwards(this->class, 0);
1407         if (!ret || ret == 1)
1408                 return ret;
1409
1410         return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1411 }
1412
1413 static inline void print_irqtrace_events(struct task_struct *curr)
1414 {
1415         printk("irq event stamp: %u\n", curr->irq_events);
1416         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1417         print_ip_sym(curr->hardirq_enable_ip);
1418         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1419         print_ip_sym(curr->hardirq_disable_ip);
1420         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1421         print_ip_sym(curr->softirq_enable_ip);
1422         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1423         print_ip_sym(curr->softirq_disable_ip);
1424 }
1425
1426 #else
1427 static inline void print_irqtrace_events(struct task_struct *curr)
1428 {
1429 }
1430 #endif
1431
1432 static int
1433 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1434                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1435 {
1436         __raw_spin_unlock(&hash_lock);
1437         debug_locks_off();
1438         if (debug_locks_silent)
1439                 return 0;
1440
1441         printk("\n=================================\n");
1442         printk(  "[ INFO: inconsistent lock state ]\n");
1443         printk(  "---------------------------------\n");
1444
1445         printk("inconsistent {%s} -> {%s} usage.\n",
1446                 usage_str[prev_bit], usage_str[new_bit]);
1447
1448         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1449                 curr->comm, curr->pid,
1450                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1451                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1452                 trace_hardirqs_enabled(curr),
1453                 trace_softirqs_enabled(curr));
1454         print_lock(this);
1455
1456         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1457         print_stack_trace(this->class->usage_traces + prev_bit, 1);
1458
1459         print_irqtrace_events(curr);
1460         printk("\nother info that might help us debug this:\n");
1461         lockdep_print_held_locks(curr);
1462
1463         printk("\nstack backtrace:\n");
1464         dump_stack();
1465
1466         return 0;
1467 }
1468
1469 /*
1470  * Print out an error if an invalid bit is set:
1471  */
1472 static inline int
1473 valid_state(struct task_struct *curr, struct held_lock *this,
1474             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1475 {
1476         if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1477                 return print_usage_bug(curr, this, bad_bit, new_bit);
1478         return 1;
1479 }
1480
1481 #define STRICT_READ_CHECKS      1
1482
1483 /*
1484  * Mark a lock with a usage bit, and validate the state transition:
1485  */
1486 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1487                      enum lock_usage_bit new_bit, unsigned long ip)
1488 {
1489         unsigned int new_mask = 1 << new_bit, ret = 1;
1490
1491         /*
1492          * If already set then do not dirty the cacheline,
1493          * nor do any checks:
1494          */
1495         if (likely(this->class->usage_mask & new_mask))
1496                 return 1;
1497
1498         __raw_spin_lock(&hash_lock);
1499         /*
1500          * Make sure we didnt race:
1501          */
1502         if (unlikely(this->class->usage_mask & new_mask)) {
1503                 __raw_spin_unlock(&hash_lock);
1504                 return 1;
1505         }
1506
1507         this->class->usage_mask |= new_mask;
1508
1509 #ifdef CONFIG_TRACE_IRQFLAGS
1510         if (new_bit == LOCK_ENABLED_HARDIRQS ||
1511                         new_bit == LOCK_ENABLED_HARDIRQS_READ)
1512                 ip = curr->hardirq_enable_ip;
1513         else if (new_bit == LOCK_ENABLED_SOFTIRQS ||
1514                         new_bit == LOCK_ENABLED_SOFTIRQS_READ)
1515                 ip = curr->softirq_enable_ip;
1516 #endif
1517         if (!save_trace(this->class->usage_traces + new_bit))
1518                 return 0;
1519
1520         switch (new_bit) {
1521 #ifdef CONFIG_TRACE_IRQFLAGS
1522         case LOCK_USED_IN_HARDIRQ:
1523                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1524                         return 0;
1525                 if (!valid_state(curr, this, new_bit,
1526                                  LOCK_ENABLED_HARDIRQS_READ))
1527                         return 0;
1528                 /*
1529                  * just marked it hardirq-safe, check that this lock
1530                  * took no hardirq-unsafe lock in the past:
1531                  */
1532                 if (!check_usage_forwards(curr, this,
1533                                           LOCK_ENABLED_HARDIRQS, "hard"))
1534                         return 0;
1535 #if STRICT_READ_CHECKS
1536                 /*
1537                  * just marked it hardirq-safe, check that this lock
1538                  * took no hardirq-unsafe-read lock in the past:
1539                  */
1540                 if (!check_usage_forwards(curr, this,
1541                                 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1542                         return 0;
1543 #endif
1544                 if (hardirq_verbose(this->class))
1545                         ret = 2;
1546                 break;
1547         case LOCK_USED_IN_SOFTIRQ:
1548                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1549                         return 0;
1550                 if (!valid_state(curr, this, new_bit,
1551                                  LOCK_ENABLED_SOFTIRQS_READ))
1552                         return 0;
1553                 /*
1554                  * just marked it softirq-safe, check that this lock
1555                  * took no softirq-unsafe lock in the past:
1556                  */
1557                 if (!check_usage_forwards(curr, this,
1558                                           LOCK_ENABLED_SOFTIRQS, "soft"))
1559                         return 0;
1560 #if STRICT_READ_CHECKS
1561                 /*
1562                  * just marked it softirq-safe, check that this lock
1563                  * took no softirq-unsafe-read lock in the past:
1564                  */
1565                 if (!check_usage_forwards(curr, this,
1566                                 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1567                         return 0;
1568 #endif
1569                 if (softirq_verbose(this->class))
1570                         ret = 2;
1571                 break;
1572         case LOCK_USED_IN_HARDIRQ_READ:
1573                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1574                         return 0;
1575                 /*
1576                  * just marked it hardirq-read-safe, check that this lock
1577                  * took no hardirq-unsafe lock in the past:
1578                  */
1579                 if (!check_usage_forwards(curr, this,
1580                                           LOCK_ENABLED_HARDIRQS, "hard"))
1581                         return 0;
1582                 if (hardirq_verbose(this->class))
1583                         ret = 2;
1584                 break;
1585         case LOCK_USED_IN_SOFTIRQ_READ:
1586                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1587                         return 0;
1588                 /*
1589                  * just marked it softirq-read-safe, check that this lock
1590                  * took no softirq-unsafe lock in the past:
1591                  */
1592                 if (!check_usage_forwards(curr, this,
1593                                           LOCK_ENABLED_SOFTIRQS, "soft"))
1594                         return 0;
1595                 if (softirq_verbose(this->class))
1596                         ret = 2;
1597                 break;
1598         case LOCK_ENABLED_HARDIRQS:
1599                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1600                         return 0;
1601                 if (!valid_state(curr, this, new_bit,
1602                                  LOCK_USED_IN_HARDIRQ_READ))
1603                         return 0;
1604                 /*
1605                  * just marked it hardirq-unsafe, check that no hardirq-safe
1606                  * lock in the system ever took it in the past:
1607                  */
1608                 if (!check_usage_backwards(curr, this,
1609                                            LOCK_USED_IN_HARDIRQ, "hard"))
1610                         return 0;
1611 #if STRICT_READ_CHECKS
1612                 /*
1613                  * just marked it hardirq-unsafe, check that no
1614                  * hardirq-safe-read lock in the system ever took
1615                  * it in the past:
1616                  */
1617                 if (!check_usage_backwards(curr, this,
1618                                    LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1619                         return 0;
1620 #endif
1621                 if (hardirq_verbose(this->class))
1622                         ret = 2;
1623                 break;
1624         case LOCK_ENABLED_SOFTIRQS:
1625                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1626                         return 0;
1627                 if (!valid_state(curr, this, new_bit,
1628                                  LOCK_USED_IN_SOFTIRQ_READ))
1629                         return 0;
1630                 /*
1631                  * just marked it softirq-unsafe, check that no softirq-safe
1632                  * lock in the system ever took it in the past:
1633                  */
1634                 if (!check_usage_backwards(curr, this,
1635                                            LOCK_USED_IN_SOFTIRQ, "soft"))
1636                         return 0;
1637 #if STRICT_READ_CHECKS
1638                 /*
1639                  * just marked it softirq-unsafe, check that no
1640                  * softirq-safe-read lock in the system ever took
1641                  * it in the past:
1642                  */
1643                 if (!check_usage_backwards(curr, this,
1644                                    LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1645                         return 0;
1646 #endif
1647                 if (softirq_verbose(this->class))
1648                         ret = 2;
1649                 break;
1650         case LOCK_ENABLED_HARDIRQS_READ:
1651                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1652                         return 0;
1653 #if STRICT_READ_CHECKS
1654                 /*
1655                  * just marked it hardirq-read-unsafe, check that no
1656                  * hardirq-safe lock in the system ever took it in the past:
1657                  */
1658                 if (!check_usage_backwards(curr, this,
1659                                            LOCK_USED_IN_HARDIRQ, "hard"))
1660                         return 0;
1661 #endif
1662                 if (hardirq_verbose(this->class))
1663                         ret = 2;
1664                 break;
1665         case LOCK_ENABLED_SOFTIRQS_READ:
1666                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1667                         return 0;
1668 #if STRICT_READ_CHECKS
1669                 /*
1670                  * just marked it softirq-read-unsafe, check that no
1671                  * softirq-safe lock in the system ever took it in the past:
1672                  */
1673                 if (!check_usage_backwards(curr, this,
1674                                            LOCK_USED_IN_SOFTIRQ, "soft"))
1675                         return 0;
1676 #endif
1677                 if (softirq_verbose(this->class))
1678                         ret = 2;
1679                 break;
1680 #endif
1681         case LOCK_USED:
1682                 /*
1683                  * Add it to the global list of classes:
1684                  */
1685                 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1686                 debug_atomic_dec(&nr_unused_locks);
1687                 break;
1688         default:
1689                 debug_locks_off();
1690                 WARN_ON(1);
1691                 return 0;
1692         }
1693
1694         __raw_spin_unlock(&hash_lock);
1695
1696         /*
1697          * We must printk outside of the hash_lock:
1698          */
1699         if (ret == 2) {
1700                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1701                 print_lock(this);
1702                 print_irqtrace_events(curr);
1703                 dump_stack();
1704         }
1705
1706         return ret;
1707 }
1708
1709 #ifdef CONFIG_TRACE_IRQFLAGS
1710 /*
1711  * Mark all held locks with a usage bit:
1712  */
1713 static int
1714 mark_held_locks(struct task_struct *curr, int hardirq, unsigned long ip)
1715 {
1716         enum lock_usage_bit usage_bit;
1717         struct held_lock *hlock;
1718         int i;
1719
1720         for (i = 0; i < curr->lockdep_depth; i++) {
1721                 hlock = curr->held_locks + i;
1722
1723                 if (hardirq) {
1724                         if (hlock->read)
1725                                 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1726                         else
1727                                 usage_bit = LOCK_ENABLED_HARDIRQS;
1728                 } else {
1729                         if (hlock->read)
1730                                 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1731                         else
1732                                 usage_bit = LOCK_ENABLED_SOFTIRQS;
1733                 }
1734                 if (!mark_lock(curr, hlock, usage_bit, ip))
1735                         return 0;
1736         }
1737
1738         return 1;
1739 }
1740
1741 /*
1742  * Debugging helper: via this flag we know that we are in
1743  * 'early bootup code', and will warn about any invalid irqs-on event:
1744  */
1745 static int early_boot_irqs_enabled;
1746
1747 void early_boot_irqs_off(void)
1748 {
1749         early_boot_irqs_enabled = 0;
1750 }
1751
1752 void early_boot_irqs_on(void)
1753 {
1754         early_boot_irqs_enabled = 1;
1755 }
1756
1757 /*
1758  * Hardirqs will be enabled:
1759  */
1760 void trace_hardirqs_on(void)
1761 {
1762         struct task_struct *curr = current;
1763         unsigned long ip;
1764
1765         if (unlikely(!debug_locks || current->lockdep_recursion))
1766                 return;
1767
1768         if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
1769                 return;
1770
1771         if (unlikely(curr->hardirqs_enabled)) {
1772                 debug_atomic_inc(&redundant_hardirqs_on);
1773                 return;
1774         }
1775         /* we'll do an OFF -> ON transition: */
1776         curr->hardirqs_enabled = 1;
1777         ip = (unsigned long) __builtin_return_address(0);
1778
1779         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1780                 return;
1781         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
1782                 return;
1783         /*
1784          * We are going to turn hardirqs on, so set the
1785          * usage bit for all held locks:
1786          */
1787         if (!mark_held_locks(curr, 1, ip))
1788                 return;
1789         /*
1790          * If we have softirqs enabled, then set the usage
1791          * bit for all held locks. (disabled hardirqs prevented
1792          * this bit from being set before)
1793          */
1794         if (curr->softirqs_enabled)
1795                 if (!mark_held_locks(curr, 0, ip))
1796                         return;
1797
1798         curr->hardirq_enable_ip = ip;
1799         curr->hardirq_enable_event = ++curr->irq_events;
1800         debug_atomic_inc(&hardirqs_on_events);
1801 }
1802
1803 EXPORT_SYMBOL(trace_hardirqs_on);
1804
1805 /*
1806  * Hardirqs were disabled:
1807  */
1808 void trace_hardirqs_off(void)
1809 {
1810         struct task_struct *curr = current;
1811
1812         if (unlikely(!debug_locks || current->lockdep_recursion))
1813                 return;
1814
1815         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1816                 return;
1817
1818         if (curr->hardirqs_enabled) {
1819                 /*
1820                  * We have done an ON -> OFF transition:
1821                  */
1822                 curr->hardirqs_enabled = 0;
1823                 curr->hardirq_disable_ip = _RET_IP_;
1824                 curr->hardirq_disable_event = ++curr->irq_events;
1825                 debug_atomic_inc(&hardirqs_off_events);
1826         } else
1827                 debug_atomic_inc(&redundant_hardirqs_off);
1828 }
1829
1830 EXPORT_SYMBOL(trace_hardirqs_off);
1831
1832 /*
1833  * Softirqs will be enabled:
1834  */
1835 void trace_softirqs_on(unsigned long ip)
1836 {
1837         struct task_struct *curr = current;
1838
1839         if (unlikely(!debug_locks))
1840                 return;
1841
1842         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1843                 return;
1844
1845         if (curr->softirqs_enabled) {
1846                 debug_atomic_inc(&redundant_softirqs_on);
1847                 return;
1848         }
1849
1850         /*
1851          * We'll do an OFF -> ON transition:
1852          */
1853         curr->softirqs_enabled = 1;
1854         curr->softirq_enable_ip = ip;
1855         curr->softirq_enable_event = ++curr->irq_events;
1856         debug_atomic_inc(&softirqs_on_events);
1857         /*
1858          * We are going to turn softirqs on, so set the
1859          * usage bit for all held locks, if hardirqs are
1860          * enabled too:
1861          */
1862         if (curr->hardirqs_enabled)
1863                 mark_held_locks(curr, 0, ip);
1864 }
1865
1866 /*
1867  * Softirqs were disabled:
1868  */
1869 void trace_softirqs_off(unsigned long ip)
1870 {
1871         struct task_struct *curr = current;
1872
1873         if (unlikely(!debug_locks))
1874                 return;
1875
1876         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1877                 return;
1878
1879         if (curr->softirqs_enabled) {
1880                 /*
1881                  * We have done an ON -> OFF transition:
1882                  */
1883                 curr->softirqs_enabled = 0;
1884                 curr->softirq_disable_ip = ip;
1885                 curr->softirq_disable_event = ++curr->irq_events;
1886                 debug_atomic_inc(&softirqs_off_events);
1887                 DEBUG_LOCKS_WARN_ON(!softirq_count());
1888         } else
1889                 debug_atomic_inc(&redundant_softirqs_off);
1890 }
1891
1892 #endif
1893
1894 /*
1895  * Initialize a lock instance's lock-class mapping info:
1896  */
1897 void lockdep_init_map(struct lockdep_map *lock, const char *name,
1898                       struct lock_class_key *key)
1899 {
1900         if (unlikely(!debug_locks))
1901                 return;
1902
1903         if (DEBUG_LOCKS_WARN_ON(!key))
1904                 return;
1905         if (DEBUG_LOCKS_WARN_ON(!name))
1906                 return;
1907         /*
1908          * Sanity check, the lock-class key must be persistent:
1909          */
1910         if (!static_obj(key)) {
1911                 printk("BUG: key %p not in .data!\n", key);
1912                 DEBUG_LOCKS_WARN_ON(1);
1913                 return;
1914         }
1915         lock->name = name;
1916         lock->key = key;
1917         memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
1918 }
1919
1920 EXPORT_SYMBOL_GPL(lockdep_init_map);
1921
1922 /*
1923  * This gets called for every mutex_lock*()/spin_lock*() operation.
1924  * We maintain the dependency maps and validate the locking attempt:
1925  */
1926 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1927                           int trylock, int read, int check, int hardirqs_off,
1928                           unsigned long ip)
1929 {
1930         struct task_struct *curr = current;
1931         struct held_lock *hlock;
1932         struct lock_class *class;
1933         unsigned int depth, id;
1934         int chain_head = 0;
1935         u64 chain_key;
1936
1937         if (unlikely(!debug_locks))
1938                 return 0;
1939
1940         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1941                 return 0;
1942
1943         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
1944                 debug_locks_off();
1945                 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
1946                 printk("turning off the locking correctness validator.\n");
1947                 return 0;
1948         }
1949
1950         class = lock->class[subclass];
1951         /* not cached yet? */
1952         if (unlikely(!class)) {
1953                 class = register_lock_class(lock, subclass);
1954                 if (!class)
1955                         return 0;
1956         }
1957         debug_atomic_inc((atomic_t *)&class->ops);
1958         if (very_verbose(class)) {
1959                 printk("\nacquire class [%p] %s", class->key, class->name);
1960                 if (class->name_version > 1)
1961                         printk("#%d", class->name_version);
1962                 printk("\n");
1963                 dump_stack();
1964         }
1965
1966         /*
1967          * Add the lock to the list of currently held locks.
1968          * (we dont increase the depth just yet, up until the
1969          * dependency checks are done)
1970          */
1971         depth = curr->lockdep_depth;
1972         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
1973                 return 0;
1974
1975         hlock = curr->held_locks + depth;
1976
1977         hlock->class = class;
1978         hlock->acquire_ip = ip;
1979         hlock->instance = lock;
1980         hlock->trylock = trylock;
1981         hlock->read = read;
1982         hlock->check = check;
1983         hlock->hardirqs_off = hardirqs_off;
1984
1985         if (check != 2)
1986                 goto out_calc_hash;
1987 #ifdef CONFIG_TRACE_IRQFLAGS
1988         /*
1989          * If non-trylock use in a hardirq or softirq context, then
1990          * mark the lock as used in these contexts:
1991          */
1992         if (!trylock) {
1993                 if (read) {
1994                         if (curr->hardirq_context)
1995                                 if (!mark_lock(curr, hlock,
1996                                                 LOCK_USED_IN_HARDIRQ_READ, ip))
1997                                         return 0;
1998                         if (curr->softirq_context)
1999                                 if (!mark_lock(curr, hlock,
2000                                                 LOCK_USED_IN_SOFTIRQ_READ, ip))
2001                                         return 0;
2002                 } else {
2003                         if (curr->hardirq_context)
2004                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ, ip))
2005                                         return 0;
2006                         if (curr->softirq_context)
2007                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ, ip))
2008                                         return 0;
2009                 }
2010         }
2011         if (!hardirqs_off) {
2012                 if (read) {
2013                         if (!mark_lock(curr, hlock,
2014                                         LOCK_ENABLED_HARDIRQS_READ, ip))
2015                                 return 0;
2016                         if (curr->softirqs_enabled)
2017                                 if (!mark_lock(curr, hlock,
2018                                                 LOCK_ENABLED_SOFTIRQS_READ, ip))
2019                                         return 0;
2020                 } else {
2021                         if (!mark_lock(curr, hlock,
2022                                         LOCK_ENABLED_HARDIRQS, ip))
2023                                 return 0;
2024                         if (curr->softirqs_enabled)
2025                                 if (!mark_lock(curr, hlock,
2026                                                 LOCK_ENABLED_SOFTIRQS, ip))
2027                                         return 0;
2028                 }
2029         }
2030 #endif
2031         /* mark it as used: */
2032         if (!mark_lock(curr, hlock, LOCK_USED, ip))
2033                 return 0;
2034 out_calc_hash:
2035         /*
2036          * Calculate the chain hash: it's the combined has of all the
2037          * lock keys along the dependency chain. We save the hash value
2038          * at every step so that we can get the current hash easily
2039          * after unlock. The chain hash is then used to cache dependency
2040          * results.
2041          *
2042          * The 'key ID' is what is the most compact key value to drive
2043          * the hash, not class->key.
2044          */
2045         id = class - lock_classes;
2046         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2047                 return 0;
2048
2049         chain_key = curr->curr_chain_key;
2050         if (!depth) {
2051                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2052                         return 0;
2053                 chain_head = 1;
2054         }
2055
2056         hlock->prev_chain_key = chain_key;
2057
2058 #ifdef CONFIG_TRACE_IRQFLAGS
2059         /*
2060          * Keep track of points where we cross into an interrupt context:
2061          */
2062         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2063                                 curr->softirq_context;
2064         if (depth) {
2065                 struct held_lock *prev_hlock;
2066
2067                 prev_hlock = curr->held_locks + depth-1;
2068                 /*
2069                  * If we cross into another context, reset the
2070                  * hash key (this also prevents the checking and the
2071                  * adding of the dependency to 'prev'):
2072                  */
2073                 if (prev_hlock->irq_context != hlock->irq_context) {
2074                         chain_key = 0;
2075                         chain_head = 1;
2076                 }
2077         }
2078 #endif
2079         chain_key = iterate_chain_key(chain_key, id);
2080         curr->curr_chain_key = chain_key;
2081
2082         /*
2083          * Trylock needs to maintain the stack of held locks, but it
2084          * does not add new dependencies, because trylock can be done
2085          * in any order.
2086          *
2087          * We look up the chain_key and do the O(N^2) check and update of
2088          * the dependencies only if this is a new dependency chain.
2089          * (If lookup_chain_cache() returns with 1 it acquires
2090          * hash_lock for us)
2091          */
2092         if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
2093                 /*
2094                  * Check whether last held lock:
2095                  *
2096                  * - is irq-safe, if this lock is irq-unsafe
2097                  * - is softirq-safe, if this lock is hardirq-unsafe
2098                  *
2099                  * And check whether the new lock's dependency graph
2100                  * could lead back to the previous lock.
2101                  *
2102                  * any of these scenarios could lead to a deadlock. If
2103                  * All validations
2104                  */
2105                 int ret = check_deadlock(curr, hlock, lock, read);
2106
2107                 if (!ret)
2108                         return 0;
2109                 /*
2110                  * Mark recursive read, as we jump over it when
2111                  * building dependencies (just like we jump over
2112                  * trylock entries):
2113                  */
2114                 if (ret == 2)
2115                         hlock->read = 2;
2116                 /*
2117                  * Add dependency only if this lock is not the head
2118                  * of the chain, and if it's not a secondary read-lock:
2119                  */
2120                 if (!chain_head && ret != 2)
2121                         if (!check_prevs_add(curr, hlock))
2122                                 return 0;
2123                 __raw_spin_unlock(&hash_lock);
2124         }
2125         curr->lockdep_depth++;
2126         check_chain_key(curr);
2127         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2128                 debug_locks_off();
2129                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2130                 printk("turning off the locking correctness validator.\n");
2131                 return 0;
2132         }
2133         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2134                 max_lockdep_depth = curr->lockdep_depth;
2135
2136         return 1;
2137 }
2138
2139 static int
2140 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2141                            unsigned long ip)
2142 {
2143         if (!debug_locks_off())
2144                 return 0;
2145         if (debug_locks_silent)
2146                 return 0;
2147
2148         printk("\n=====================================\n");
2149         printk(  "[ BUG: bad unlock balance detected! ]\n");
2150         printk(  "-------------------------------------\n");
2151         printk("%s/%d is trying to release lock (",
2152                 curr->comm, curr->pid);
2153         print_lockdep_cache(lock);
2154         printk(") at:\n");
2155         print_ip_sym(ip);
2156         printk("but there are no more locks to release!\n");
2157         printk("\nother info that might help us debug this:\n");
2158         lockdep_print_held_locks(curr);
2159
2160         printk("\nstack backtrace:\n");
2161         dump_stack();
2162
2163         return 0;
2164 }
2165
2166 /*
2167  * Common debugging checks for both nested and non-nested unlock:
2168  */
2169 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2170                         unsigned long ip)
2171 {
2172         if (unlikely(!debug_locks))
2173                 return 0;
2174         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2175                 return 0;
2176
2177         if (curr->lockdep_depth <= 0)
2178                 return print_unlock_inbalance_bug(curr, lock, ip);
2179
2180         return 1;
2181 }
2182
2183 /*
2184  * Remove the lock to the list of currently held locks in a
2185  * potentially non-nested (out of order) manner. This is a
2186  * relatively rare operation, as all the unlock APIs default
2187  * to nested mode (which uses lock_release()):
2188  */
2189 static int
2190 lock_release_non_nested(struct task_struct *curr,
2191                         struct lockdep_map *lock, unsigned long ip)
2192 {
2193         struct held_lock *hlock, *prev_hlock;
2194         unsigned int depth;
2195         int i;
2196
2197         /*
2198          * Check whether the lock exists in the current stack
2199          * of held locks:
2200          */
2201         depth = curr->lockdep_depth;
2202         if (DEBUG_LOCKS_WARN_ON(!depth))
2203                 return 0;
2204
2205         prev_hlock = NULL;
2206         for (i = depth-1; i >= 0; i--) {
2207                 hlock = curr->held_locks + i;
2208                 /*
2209                  * We must not cross into another context:
2210                  */
2211                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2212                         break;
2213                 if (hlock->instance == lock)
2214                         goto found_it;
2215                 prev_hlock = hlock;
2216         }
2217         return print_unlock_inbalance_bug(curr, lock, ip);
2218
2219 found_it:
2220         /*
2221          * We have the right lock to unlock, 'hlock' points to it.
2222          * Now we remove it from the stack, and add back the other
2223          * entries (if any), recalculating the hash along the way:
2224          */
2225         curr->lockdep_depth = i;
2226         curr->curr_chain_key = hlock->prev_chain_key;
2227
2228         for (i++; i < depth; i++) {
2229                 hlock = curr->held_locks + i;
2230                 if (!__lock_acquire(hlock->instance,
2231                         hlock->class->subclass, hlock->trylock,
2232                                 hlock->read, hlock->check, hlock->hardirqs_off,
2233                                 hlock->acquire_ip))
2234                         return 0;
2235         }
2236
2237         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2238                 return 0;
2239         return 1;
2240 }
2241
2242 /*
2243  * Remove the lock to the list of currently held locks - this gets
2244  * called on mutex_unlock()/spin_unlock*() (or on a failed
2245  * mutex_lock_interruptible()). This is done for unlocks that nest
2246  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2247  */
2248 static int lock_release_nested(struct task_struct *curr,
2249                                struct lockdep_map *lock, unsigned long ip)
2250 {
2251         struct held_lock *hlock;
2252         unsigned int depth;
2253
2254         /*
2255          * Pop off the top of the lock stack:
2256          */
2257         depth = curr->lockdep_depth - 1;
2258         hlock = curr->held_locks + depth;
2259
2260         /*
2261          * Is the unlock non-nested:
2262          */
2263         if (hlock->instance != lock)
2264                 return lock_release_non_nested(curr, lock, ip);
2265         curr->lockdep_depth--;
2266
2267         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2268                 return 0;
2269
2270         curr->curr_chain_key = hlock->prev_chain_key;
2271
2272 #ifdef CONFIG_DEBUG_LOCKDEP
2273         hlock->prev_chain_key = 0;
2274         hlock->class = NULL;
2275         hlock->acquire_ip = 0;
2276         hlock->irq_context = 0;
2277 #endif
2278         return 1;
2279 }
2280
2281 /*
2282  * Remove the lock to the list of currently held locks - this gets
2283  * called on mutex_unlock()/spin_unlock*() (or on a failed
2284  * mutex_lock_interruptible()). This is done for unlocks that nest
2285  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2286  */
2287 static void
2288 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2289 {
2290         struct task_struct *curr = current;
2291
2292         if (!check_unlock(curr, lock, ip))
2293                 return;
2294
2295         if (nested) {
2296                 if (!lock_release_nested(curr, lock, ip))
2297                         return;
2298         } else {
2299                 if (!lock_release_non_nested(curr, lock, ip))
2300                         return;
2301         }
2302
2303         check_chain_key(curr);
2304 }
2305
2306 /*
2307  * Check whether we follow the irq-flags state precisely:
2308  */
2309 static void check_flags(unsigned long flags)
2310 {
2311 #if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2312         if (!debug_locks)
2313                 return;
2314
2315         if (irqs_disabled_flags(flags))
2316                 DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
2317         else
2318                 DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
2319
2320         /*
2321          * We dont accurately track softirq state in e.g.
2322          * hardirq contexts (such as on 4KSTACKS), so only
2323          * check if not in hardirq contexts:
2324          */
2325         if (!hardirq_count()) {
2326                 if (softirq_count())
2327                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2328                 else
2329                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2330         }
2331
2332         if (!debug_locks)
2333                 print_irqtrace_events(current);
2334 #endif
2335 }
2336
2337 /*
2338  * We are not always called with irqs disabled - do that here,
2339  * and also avoid lockdep recursion:
2340  */
2341 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2342                   int trylock, int read, int check, unsigned long ip)
2343 {
2344         unsigned long flags;
2345
2346         if (unlikely(current->lockdep_recursion))
2347                 return;
2348
2349         raw_local_irq_save(flags);
2350         check_flags(flags);
2351
2352         current->lockdep_recursion = 1;
2353         __lock_acquire(lock, subclass, trylock, read, check,
2354                        irqs_disabled_flags(flags), ip);
2355         current->lockdep_recursion = 0;
2356         raw_local_irq_restore(flags);
2357 }
2358
2359 EXPORT_SYMBOL_GPL(lock_acquire);
2360
2361 void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2362 {
2363         unsigned long flags;
2364
2365         if (unlikely(current->lockdep_recursion))
2366                 return;
2367
2368         raw_local_irq_save(flags);
2369         check_flags(flags);
2370         current->lockdep_recursion = 1;
2371         __lock_release(lock, nested, ip);
2372         current->lockdep_recursion = 0;
2373         raw_local_irq_restore(flags);
2374 }
2375
2376 EXPORT_SYMBOL_GPL(lock_release);
2377
2378 /*
2379  * Used by the testsuite, sanitize the validator state
2380  * after a simulated failure:
2381  */
2382
2383 void lockdep_reset(void)
2384 {
2385         unsigned long flags;
2386
2387         raw_local_irq_save(flags);
2388         current->curr_chain_key = 0;
2389         current->lockdep_depth = 0;
2390         current->lockdep_recursion = 0;
2391         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2392         nr_hardirq_chains = 0;
2393         nr_softirq_chains = 0;
2394         nr_process_chains = 0;
2395         debug_locks = 1;
2396         raw_local_irq_restore(flags);
2397 }
2398
2399 static void zap_class(struct lock_class *class)
2400 {
2401         int i;
2402
2403         /*
2404          * Remove all dependencies this lock is
2405          * involved in:
2406          */
2407         for (i = 0; i < nr_list_entries; i++) {
2408                 if (list_entries[i].class == class)
2409                         list_del_rcu(&list_entries[i].entry);
2410         }
2411         /*
2412          * Unhash the class and remove it from the all_lock_classes list:
2413          */
2414         list_del_rcu(&class->hash_entry);
2415         list_del_rcu(&class->lock_entry);
2416
2417 }
2418
2419 static inline int within(void *addr, void *start, unsigned long size)
2420 {
2421         return addr >= start && addr < start + size;
2422 }
2423
2424 void lockdep_free_key_range(void *start, unsigned long size)
2425 {
2426         struct lock_class *class, *next;
2427         struct list_head *head;
2428         unsigned long flags;
2429         int i;
2430
2431         raw_local_irq_save(flags);
2432         __raw_spin_lock(&hash_lock);
2433
2434         /*
2435          * Unhash all classes that were created by this module:
2436          */
2437         for (i = 0; i < CLASSHASH_SIZE; i++) {
2438                 head = classhash_table + i;
2439                 if (list_empty(head))
2440                         continue;
2441                 list_for_each_entry_safe(class, next, head, hash_entry)
2442                         if (within(class->key, start, size))
2443                                 zap_class(class);
2444         }
2445
2446         __raw_spin_unlock(&hash_lock);
2447         raw_local_irq_restore(flags);
2448 }
2449
2450 void lockdep_reset_lock(struct lockdep_map *lock)
2451 {
2452         struct lock_class *class, *next, *entry;
2453         struct list_head *head;
2454         unsigned long flags;
2455         int i, j;
2456
2457         raw_local_irq_save(flags);
2458         __raw_spin_lock(&hash_lock);
2459
2460         /*
2461          * Remove all classes this lock has:
2462          */
2463         for (i = 0; i < CLASSHASH_SIZE; i++) {
2464                 head = classhash_table + i;
2465                 if (list_empty(head))
2466                         continue;
2467                 list_for_each_entry_safe(class, next, head, hash_entry) {
2468                         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2469                                 entry = lock->class[j];
2470                                 if (class == entry) {
2471                                         zap_class(class);
2472                                         lock->class[j] = NULL;
2473                                         break;
2474                                 }
2475                         }
2476                 }
2477         }
2478
2479         /*
2480          * Debug check: in the end all mapped classes should
2481          * be gone.
2482          */
2483         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2484                 entry = lock->class[j];
2485                 if (!entry)
2486                         continue;
2487                 __raw_spin_unlock(&hash_lock);
2488                 DEBUG_LOCKS_WARN_ON(1);
2489                 raw_local_irq_restore(flags);
2490                 return;
2491         }
2492
2493         __raw_spin_unlock(&hash_lock);
2494         raw_local_irq_restore(flags);
2495 }
2496
2497 void __init lockdep_init(void)
2498 {
2499         int i;
2500
2501         /*
2502          * Some architectures have their own start_kernel()
2503          * code which calls lockdep_init(), while we also
2504          * call lockdep_init() from the start_kernel() itself,
2505          * and we want to initialize the hashes only once:
2506          */
2507         if (lockdep_initialized)
2508                 return;
2509
2510         for (i = 0; i < CLASSHASH_SIZE; i++)
2511                 INIT_LIST_HEAD(classhash_table + i);
2512
2513         for (i = 0; i < CHAINHASH_SIZE; i++)
2514                 INIT_LIST_HEAD(chainhash_table + i);
2515
2516         lockdep_initialized = 1;
2517 }
2518
2519 void __init lockdep_info(void)
2520 {
2521         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
2522
2523         printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
2524         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
2525         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
2526         printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
2527         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
2528         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
2529         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
2530
2531         printk(" memory used by lock dependency info: %lu kB\n",
2532                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
2533                 sizeof(struct list_head) * CLASSHASH_SIZE +
2534                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
2535                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
2536                 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
2537
2538         printk(" per task-struct memory footprint: %lu bytes\n",
2539                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
2540
2541 #ifdef CONFIG_DEBUG_LOCKDEP
2542         if (lockdep_init_error)
2543                 printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
2544 #endif
2545 }
2546
2547 static inline int in_range(const void *start, const void *addr, const void *end)
2548 {
2549         return addr >= start && addr <= end;
2550 }
2551
2552 static void
2553 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2554                      const void *mem_to)
2555 {
2556         if (!debug_locks_off())
2557                 return;
2558         if (debug_locks_silent)
2559                 return;
2560
2561         printk("\n=========================\n");
2562         printk(  "[ BUG: held lock freed! ]\n");
2563         printk(  "-------------------------\n");
2564         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2565                 curr->comm, curr->pid, mem_from, mem_to-1);
2566         lockdep_print_held_locks(curr);
2567
2568         printk("\nstack backtrace:\n");
2569         dump_stack();
2570 }
2571
2572 /*
2573  * Called when kernel memory is freed (or unmapped), or if a lock
2574  * is destroyed or reinitialized - this code checks whether there is
2575  * any held lock in the memory range of <from> to <to>:
2576  */
2577 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2578 {
2579         const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
2580         struct task_struct *curr = current;
2581         struct held_lock *hlock;
2582         unsigned long flags;
2583         int i;
2584
2585         if (unlikely(!debug_locks))
2586                 return;
2587
2588         local_irq_save(flags);
2589         for (i = 0; i < curr->lockdep_depth; i++) {
2590                 hlock = curr->held_locks + i;
2591
2592                 lock_from = (void *)hlock->instance;
2593                 lock_to = (void *)(hlock->instance + 1);
2594
2595                 if (!in_range(mem_from, lock_from, mem_to) &&
2596                                         !in_range(mem_from, lock_to, mem_to))
2597                         continue;
2598
2599                 print_freed_lock_bug(curr, mem_from, mem_to);
2600                 break;
2601         }
2602         local_irq_restore(flags);
2603 }
2604
2605 static void print_held_locks_bug(struct task_struct *curr)
2606 {
2607         if (!debug_locks_off())
2608                 return;
2609         if (debug_locks_silent)
2610                 return;
2611
2612         printk("\n=====================================\n");
2613         printk(  "[ BUG: lock held at task exit time! ]\n");
2614         printk(  "-------------------------------------\n");
2615         printk("%s/%d is exiting with locks still held!\n",
2616                 curr->comm, curr->pid);
2617         lockdep_print_held_locks(curr);
2618
2619         printk("\nstack backtrace:\n");
2620         dump_stack();
2621 }
2622
2623 void debug_check_no_locks_held(struct task_struct *task)
2624 {
2625         if (unlikely(task->lockdep_depth > 0))
2626                 print_held_locks_bug(task);
2627 }
2628
2629 void debug_show_all_locks(void)
2630 {
2631         struct task_struct *g, *p;
2632         int count = 10;
2633         int unlock = 1;
2634
2635         printk("\nShowing all locks held in the system:\n");
2636
2637         /*
2638          * Here we try to get the tasklist_lock as hard as possible,
2639          * if not successful after 2 seconds we ignore it (but keep
2640          * trying). This is to enable a debug printout even if a
2641          * tasklist_lock-holding task deadlocks or crashes.
2642          */
2643 retry:
2644         if (!read_trylock(&tasklist_lock)) {
2645                 if (count == 10)
2646                         printk("hm, tasklist_lock locked, retrying... ");
2647                 if (count) {
2648                         count--;
2649                         printk(" #%d", 10-count);
2650                         mdelay(200);
2651                         goto retry;
2652                 }
2653                 printk(" ignoring it.\n");
2654                 unlock = 0;
2655         }
2656         if (count != 10)
2657                 printk(" locked it.\n");
2658
2659         do_each_thread(g, p) {
2660                 if (p->lockdep_depth)
2661                         lockdep_print_held_locks(p);
2662                 if (!unlock)
2663                         if (read_trylock(&tasklist_lock))
2664                                 unlock = 1;
2665         } while_each_thread(g, p);
2666
2667         printk("\n");
2668         printk("=============================================\n\n");
2669
2670         if (unlock)
2671                 read_unlock(&tasklist_lock);
2672 }
2673
2674 EXPORT_SYMBOL_GPL(debug_show_all_locks);
2675
2676 void debug_show_held_locks(struct task_struct *task)
2677 {
2678         lockdep_print_held_locks(task);
2679 }
2680
2681 EXPORT_SYMBOL_GPL(debug_show_held_locks);
2682