lockdep: Print out additional debugging advice when we hit lockdep BUGs
[linux-3.10.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
44 #include <linux/stringify.h>
45 #include <linux/bitops.h>
46 #include <linux/gfp.h>
47 #include <linux/kmemcheck.h>
48
49 #include <asm/sections.h>
50
51 #include "lockdep_internals.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/lock.h>
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int prove_locking = 1;
58 module_param(prove_locking, int, 0644);
59 #else
60 #define prove_locking 0
61 #endif
62
63 #ifdef CONFIG_LOCK_STAT
64 int lock_stat = 1;
65 module_param(lock_stat, int, 0644);
66 #else
67 #define lock_stat 0
68 #endif
69
70 /*
71  * lockdep_lock: protects the lockdep graph, the hashes and the
72  *               class/list/hash allocators.
73  *
74  * This is one of the rare exceptions where it's justified
75  * to use a raw spinlock - we really dont want the spinlock
76  * code to recurse back into the lockdep code...
77  */
78 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
79
80 static int graph_lock(void)
81 {
82         arch_spin_lock(&lockdep_lock);
83         /*
84          * Make sure that if another CPU detected a bug while
85          * walking the graph we dont change it (while the other
86          * CPU is busy printing out stuff with the graph lock
87          * dropped already)
88          */
89         if (!debug_locks) {
90                 arch_spin_unlock(&lockdep_lock);
91                 return 0;
92         }
93         /* prevent any recursions within lockdep from causing deadlocks */
94         current->lockdep_recursion++;
95         return 1;
96 }
97
98 static inline int graph_unlock(void)
99 {
100         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
101                 /*
102                  * The lockdep graph lock isn't locked while we expect it to
103                  * be, we're confused now, bye!
104                  */
105                 return DEBUG_LOCKS_WARN_ON(1);
106         }
107
108         current->lockdep_recursion--;
109         arch_spin_unlock(&lockdep_lock);
110         return 0;
111 }
112
113 /*
114  * Turn lock debugging off and return with 0 if it was off already,
115  * and also release the graph lock:
116  */
117 static inline int debug_locks_off_graph_unlock(void)
118 {
119         int ret = debug_locks_off();
120
121         arch_spin_unlock(&lockdep_lock);
122
123         return ret;
124 }
125
126 static int lockdep_initialized;
127
128 unsigned long nr_list_entries;
129 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
130
131 /*
132  * All data structures here are protected by the global debug_lock.
133  *
134  * Mutex key structs only get allocated, once during bootup, and never
135  * get freed - this significantly simplifies the debugging code.
136  */
137 unsigned long nr_lock_classes;
138 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
139
140 static inline struct lock_class *hlock_class(struct held_lock *hlock)
141 {
142         if (!hlock->class_idx) {
143                 /*
144                  * Someone passed in garbage, we give up.
145                  */
146                 DEBUG_LOCKS_WARN_ON(1);
147                 return NULL;
148         }
149         return lock_classes + hlock->class_idx - 1;
150 }
151
152 #ifdef CONFIG_LOCK_STAT
153 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
154                       cpu_lock_stats);
155
156 static inline u64 lockstat_clock(void)
157 {
158         return local_clock();
159 }
160
161 static int lock_point(unsigned long points[], unsigned long ip)
162 {
163         int i;
164
165         for (i = 0; i < LOCKSTAT_POINTS; i++) {
166                 if (points[i] == 0) {
167                         points[i] = ip;
168                         break;
169                 }
170                 if (points[i] == ip)
171                         break;
172         }
173
174         return i;
175 }
176
177 static void lock_time_inc(struct lock_time *lt, u64 time)
178 {
179         if (time > lt->max)
180                 lt->max = time;
181
182         if (time < lt->min || !lt->nr)
183                 lt->min = time;
184
185         lt->total += time;
186         lt->nr++;
187 }
188
189 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
190 {
191         if (!src->nr)
192                 return;
193
194         if (src->max > dst->max)
195                 dst->max = src->max;
196
197         if (src->min < dst->min || !dst->nr)
198                 dst->min = src->min;
199
200         dst->total += src->total;
201         dst->nr += src->nr;
202 }
203
204 struct lock_class_stats lock_stats(struct lock_class *class)
205 {
206         struct lock_class_stats stats;
207         int cpu, i;
208
209         memset(&stats, 0, sizeof(struct lock_class_stats));
210         for_each_possible_cpu(cpu) {
211                 struct lock_class_stats *pcs =
212                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
213
214                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
215                         stats.contention_point[i] += pcs->contention_point[i];
216
217                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
218                         stats.contending_point[i] += pcs->contending_point[i];
219
220                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
221                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
222
223                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
224                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
225
226                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
227                         stats.bounces[i] += pcs->bounces[i];
228         }
229
230         return stats;
231 }
232
233 void clear_lock_stats(struct lock_class *class)
234 {
235         int cpu;
236
237         for_each_possible_cpu(cpu) {
238                 struct lock_class_stats *cpu_stats =
239                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
240
241                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
242         }
243         memset(class->contention_point, 0, sizeof(class->contention_point));
244         memset(class->contending_point, 0, sizeof(class->contending_point));
245 }
246
247 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
248 {
249         return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
250 }
251
252 static void put_lock_stats(struct lock_class_stats *stats)
253 {
254         put_cpu_var(cpu_lock_stats);
255 }
256
257 static void lock_release_holdtime(struct held_lock *hlock)
258 {
259         struct lock_class_stats *stats;
260         u64 holdtime;
261
262         if (!lock_stat)
263                 return;
264
265         holdtime = lockstat_clock() - hlock->holdtime_stamp;
266
267         stats = get_lock_stats(hlock_class(hlock));
268         if (hlock->read)
269                 lock_time_inc(&stats->read_holdtime, holdtime);
270         else
271                 lock_time_inc(&stats->write_holdtime, holdtime);
272         put_lock_stats(stats);
273 }
274 #else
275 static inline void lock_release_holdtime(struct held_lock *hlock)
276 {
277 }
278 #endif
279
280 /*
281  * We keep a global list of all lock classes. The list only grows,
282  * never shrinks. The list is only accessed with the lockdep
283  * spinlock lock held.
284  */
285 LIST_HEAD(all_lock_classes);
286
287 /*
288  * The lockdep classes are in a hash-table as well, for fast lookup:
289  */
290 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
291 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
292 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
293 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
294
295 static struct list_head classhash_table[CLASSHASH_SIZE];
296
297 /*
298  * We put the lock dependency chains into a hash-table as well, to cache
299  * their existence:
300  */
301 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
302 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
303 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
304 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
305
306 static struct list_head chainhash_table[CHAINHASH_SIZE];
307
308 /*
309  * The hash key of the lock dependency chains is a hash itself too:
310  * it's a hash of all locks taken up to that lock, including that lock.
311  * It's a 64-bit hash, because it's important for the keys to be
312  * unique.
313  */
314 #define iterate_chain_key(key1, key2) \
315         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
316         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
317         (key2))
318
319 void lockdep_off(void)
320 {
321         current->lockdep_recursion++;
322 }
323 EXPORT_SYMBOL(lockdep_off);
324
325 void lockdep_on(void)
326 {
327         current->lockdep_recursion--;
328 }
329 EXPORT_SYMBOL(lockdep_on);
330
331 /*
332  * Debugging switches:
333  */
334
335 #define VERBOSE                 0
336 #define VERY_VERBOSE            0
337
338 #if VERBOSE
339 # define HARDIRQ_VERBOSE        1
340 # define SOFTIRQ_VERBOSE        1
341 # define RECLAIM_VERBOSE        1
342 #else
343 # define HARDIRQ_VERBOSE        0
344 # define SOFTIRQ_VERBOSE        0
345 # define RECLAIM_VERBOSE        0
346 #endif
347
348 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
349 /*
350  * Quick filtering for interesting events:
351  */
352 static int class_filter(struct lock_class *class)
353 {
354 #if 0
355         /* Example */
356         if (class->name_version == 1 &&
357                         !strcmp(class->name, "lockname"))
358                 return 1;
359         if (class->name_version == 1 &&
360                         !strcmp(class->name, "&struct->lockfield"))
361                 return 1;
362 #endif
363         /* Filter everything else. 1 would be to allow everything else */
364         return 0;
365 }
366 #endif
367
368 static int verbose(struct lock_class *class)
369 {
370 #if VERBOSE
371         return class_filter(class);
372 #endif
373         return 0;
374 }
375
376 /*
377  * Stack-trace: tightly packed array of stack backtrace
378  * addresses. Protected by the graph_lock.
379  */
380 unsigned long nr_stack_trace_entries;
381 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
382
383 static int save_trace(struct stack_trace *trace)
384 {
385         trace->nr_entries = 0;
386         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
387         trace->entries = stack_trace + nr_stack_trace_entries;
388
389         trace->skip = 3;
390
391         save_stack_trace(trace);
392
393         /*
394          * Some daft arches put -1 at the end to indicate its a full trace.
395          *
396          * <rant> this is buggy anyway, since it takes a whole extra entry so a
397          * complete trace that maxes out the entries provided will be reported
398          * as incomplete, friggin useless </rant>
399          */
400         if (trace->nr_entries != 0 &&
401             trace->entries[trace->nr_entries-1] == ULONG_MAX)
402                 trace->nr_entries--;
403
404         trace->max_entries = trace->nr_entries;
405
406         nr_stack_trace_entries += trace->nr_entries;
407
408         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
409                 if (!debug_locks_off_graph_unlock())
410                         return 0;
411
412                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
413                 printk("turning off the locking correctness validator.\n");
414                 printk("Attach output of /proc/lock_stat to bug report\n");
415                 dump_stack();
416
417                 return 0;
418         }
419
420         return 1;
421 }
422
423 unsigned int nr_hardirq_chains;
424 unsigned int nr_softirq_chains;
425 unsigned int nr_process_chains;
426 unsigned int max_lockdep_depth;
427
428 #ifdef CONFIG_DEBUG_LOCKDEP
429 /*
430  * We cannot printk in early bootup code. Not even early_printk()
431  * might work. So we mark any initialization errors and printk
432  * about it later on, in lockdep_info().
433  */
434 static int lockdep_init_error;
435 static const char *lock_init_error;
436 static unsigned long lockdep_init_trace_data[20];
437 static struct stack_trace lockdep_init_trace = {
438         .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
439         .entries = lockdep_init_trace_data,
440 };
441
442 /*
443  * Various lockdep statistics:
444  */
445 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
446 #endif
447
448 /*
449  * Locking printouts:
450  */
451
452 #define __USAGE(__STATE)                                                \
453         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
454         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
455         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
456         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
457
458 static const char *usage_str[] =
459 {
460 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
461 #include "lockdep_states.h"
462 #undef LOCKDEP_STATE
463         [LOCK_USED] = "INITIAL USE",
464 };
465
466 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
467 {
468         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
469 }
470
471 static inline unsigned long lock_flag(enum lock_usage_bit bit)
472 {
473         return 1UL << bit;
474 }
475
476 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
477 {
478         char c = '.';
479
480         if (class->usage_mask & lock_flag(bit + 2))
481                 c = '+';
482         if (class->usage_mask & lock_flag(bit)) {
483                 c = '-';
484                 if (class->usage_mask & lock_flag(bit + 2))
485                         c = '?';
486         }
487
488         return c;
489 }
490
491 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
492 {
493         int i = 0;
494
495 #define LOCKDEP_STATE(__STATE)                                          \
496         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
497         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
498 #include "lockdep_states.h"
499 #undef LOCKDEP_STATE
500
501         usage[i] = '\0';
502 }
503
504 static void __print_lock_name(struct lock_class *class)
505 {
506         char str[KSYM_NAME_LEN];
507         const char *name;
508
509         name = class->name;
510         if (!name) {
511                 name = __get_key_name(class->key, str);
512                 printk("%s", name);
513         } else {
514                 printk("%s", name);
515                 if (class->name_version > 1)
516                         printk("#%d", class->name_version);
517                 if (class->subclass)
518                         printk("/%d", class->subclass);
519         }
520 }
521
522 static void print_lock_name(struct lock_class *class)
523 {
524         char usage[LOCK_USAGE_CHARS];
525
526         get_usage_chars(class, usage);
527
528         printk(" (");
529         __print_lock_name(class);
530         printk("){%s}", usage);
531 }
532
533 static void print_lockdep_cache(struct lockdep_map *lock)
534 {
535         const char *name;
536         char str[KSYM_NAME_LEN];
537
538         name = lock->name;
539         if (!name)
540                 name = __get_key_name(lock->key->subkeys, str);
541
542         printk("%s", name);
543 }
544
545 static void print_lock(struct held_lock *hlock)
546 {
547         print_lock_name(hlock_class(hlock));
548         printk(", at: ");
549         print_ip_sym(hlock->acquire_ip);
550 }
551
552 static void lockdep_print_held_locks(struct task_struct *curr)
553 {
554         int i, depth = curr->lockdep_depth;
555
556         if (!depth) {
557                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
558                 return;
559         }
560         printk("%d lock%s held by %s/%d:\n",
561                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
562
563         for (i = 0; i < depth; i++) {
564                 printk(" #%d: ", i);
565                 print_lock(curr->held_locks + i);
566         }
567 }
568
569 static void print_kernel_ident(void)
570 {
571         printk("%s %.*s %s\n", init_utsname()->release,
572                 (int)strcspn(init_utsname()->version, " "),
573                 init_utsname()->version,
574                 print_tainted());
575 }
576
577 static int very_verbose(struct lock_class *class)
578 {
579 #if VERY_VERBOSE
580         return class_filter(class);
581 #endif
582         return 0;
583 }
584
585 /*
586  * Is this the address of a static object:
587  */
588 static int static_obj(void *obj)
589 {
590         unsigned long start = (unsigned long) &_stext,
591                       end   = (unsigned long) &_end,
592                       addr  = (unsigned long) obj;
593
594         /*
595          * static variable?
596          */
597         if ((addr >= start) && (addr < end))
598                 return 1;
599
600         if (arch_is_kernel_data(addr))
601                 return 1;
602
603         /*
604          * in-kernel percpu var?
605          */
606         if (is_kernel_percpu_address(addr))
607                 return 1;
608
609         /*
610          * module static or percpu var?
611          */
612         return is_module_address(addr) || is_module_percpu_address(addr);
613 }
614
615 /*
616  * To make lock name printouts unique, we calculate a unique
617  * class->name_version generation counter:
618  */
619 static int count_matching_names(struct lock_class *new_class)
620 {
621         struct lock_class *class;
622         int count = 0;
623
624         if (!new_class->name)
625                 return 0;
626
627         list_for_each_entry(class, &all_lock_classes, lock_entry) {
628                 if (new_class->key - new_class->subclass == class->key)
629                         return class->name_version;
630                 if (class->name && !strcmp(class->name, new_class->name))
631                         count = max(count, class->name_version);
632         }
633
634         return count + 1;
635 }
636
637 /*
638  * Register a lock's class in the hash-table, if the class is not present
639  * yet. Otherwise we look it up. We cache the result in the lock object
640  * itself, so actual lookup of the hash should be once per lock object.
641  */
642 static inline struct lock_class *
643 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
644 {
645         struct lockdep_subclass_key *key;
646         struct list_head *hash_head;
647         struct lock_class *class;
648
649 #ifdef CONFIG_DEBUG_LOCKDEP
650         /*
651          * If the architecture calls into lockdep before initializing
652          * the hashes then we'll warn about it later. (we cannot printk
653          * right now)
654          */
655         if (unlikely(!lockdep_initialized)) {
656                 lockdep_init();
657                 lockdep_init_error = 1;
658                 lock_init_error = lock->name;
659                 save_stack_trace(&lockdep_init_trace);
660         }
661 #endif
662
663         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
664                 debug_locks_off();
665                 printk(KERN_ERR
666                         "BUG: looking up invalid subclass: %u\n", subclass);
667                 printk(KERN_ERR
668                         "turning off the locking correctness validator.\n");
669                 dump_stack();
670                 return NULL;
671         }
672
673         /*
674          * Static locks do not have their class-keys yet - for them the key
675          * is the lock object itself:
676          */
677         if (unlikely(!lock->key))
678                 lock->key = (void *)lock;
679
680         /*
681          * NOTE: the class-key must be unique. For dynamic locks, a static
682          * lock_class_key variable is passed in through the mutex_init()
683          * (or spin_lock_init()) call - which acts as the key. For static
684          * locks we use the lock object itself as the key.
685          */
686         BUILD_BUG_ON(sizeof(struct lock_class_key) >
687                         sizeof(struct lockdep_map));
688
689         key = lock->key->subkeys + subclass;
690
691         hash_head = classhashentry(key);
692
693         /*
694          * We can walk the hash lockfree, because the hash only
695          * grows, and we are careful when adding entries to the end:
696          */
697         list_for_each_entry(class, hash_head, hash_entry) {
698                 if (class->key == key) {
699                         /*
700                          * Huh! same key, different name? Did someone trample
701                          * on some memory? We're most confused.
702                          */
703                         WARN_ON_ONCE(class->name != lock->name);
704                         return class;
705                 }
706         }
707
708         return NULL;
709 }
710
711 /*
712  * Register a lock's class in the hash-table, if the class is not present
713  * yet. Otherwise we look it up. We cache the result in the lock object
714  * itself, so actual lookup of the hash should be once per lock object.
715  */
716 static inline struct lock_class *
717 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
718 {
719         struct lockdep_subclass_key *key;
720         struct list_head *hash_head;
721         struct lock_class *class;
722         unsigned long flags;
723
724         class = look_up_lock_class(lock, subclass);
725         if (likely(class))
726                 goto out_set_class_cache;
727
728         /*
729          * Debug-check: all keys must be persistent!
730          */
731         if (!static_obj(lock->key)) {
732                 debug_locks_off();
733                 printk("INFO: trying to register non-static key.\n");
734                 printk("the code is fine but needs lockdep annotation.\n");
735                 printk("turning off the locking correctness validator.\n");
736                 dump_stack();
737
738                 return NULL;
739         }
740
741         key = lock->key->subkeys + subclass;
742         hash_head = classhashentry(key);
743
744         raw_local_irq_save(flags);
745         if (!graph_lock()) {
746                 raw_local_irq_restore(flags);
747                 return NULL;
748         }
749         /*
750          * We have to do the hash-walk again, to avoid races
751          * with another CPU:
752          */
753         list_for_each_entry(class, hash_head, hash_entry)
754                 if (class->key == key)
755                         goto out_unlock_set;
756         /*
757          * Allocate a new key from the static array, and add it to
758          * the hash:
759          */
760         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
761                 if (!debug_locks_off_graph_unlock()) {
762                         raw_local_irq_restore(flags);
763                         return NULL;
764                 }
765                 raw_local_irq_restore(flags);
766
767                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
768                 printk("turning off the locking correctness validator.\n");
769                 printk("Attach output of /proc/lock_stat to bug report\n");
770                 dump_stack();
771                 return NULL;
772         }
773         class = lock_classes + nr_lock_classes++;
774         debug_atomic_inc(nr_unused_locks);
775         class->key = key;
776         class->name = lock->name;
777         class->subclass = subclass;
778         INIT_LIST_HEAD(&class->lock_entry);
779         INIT_LIST_HEAD(&class->locks_before);
780         INIT_LIST_HEAD(&class->locks_after);
781         class->name_version = count_matching_names(class);
782         /*
783          * We use RCU's safe list-add method to make
784          * parallel walking of the hash-list safe:
785          */
786         list_add_tail_rcu(&class->hash_entry, hash_head);
787         /*
788          * Add it to the global list of classes:
789          */
790         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
791
792         if (verbose(class)) {
793                 graph_unlock();
794                 raw_local_irq_restore(flags);
795
796                 printk("\nnew class %p: %s", class->key, class->name);
797                 if (class->name_version > 1)
798                         printk("#%d", class->name_version);
799                 printk("\n");
800                 dump_stack();
801
802                 raw_local_irq_save(flags);
803                 if (!graph_lock()) {
804                         raw_local_irq_restore(flags);
805                         return NULL;
806                 }
807         }
808 out_unlock_set:
809         graph_unlock();
810         raw_local_irq_restore(flags);
811
812 out_set_class_cache:
813         if (!subclass || force)
814                 lock->class_cache[0] = class;
815         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
816                 lock->class_cache[subclass] = class;
817
818         /*
819          * Hash collision, did we smoke some? We found a class with a matching
820          * hash but the subclass -- which is hashed in -- didn't match.
821          */
822         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
823                 return NULL;
824
825         return class;
826 }
827
828 #ifdef CONFIG_PROVE_LOCKING
829 /*
830  * Allocate a lockdep entry. (assumes the graph_lock held, returns
831  * with NULL on failure)
832  */
833 static struct lock_list *alloc_list_entry(void)
834 {
835         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
836                 if (!debug_locks_off_graph_unlock())
837                         return NULL;
838
839                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
840                 printk("turning off the locking correctness validator.\n");
841                 printk("Attach output of /proc/lock_stat to bug report\n");
842                 dump_stack();
843                 return NULL;
844         }
845         return list_entries + nr_list_entries++;
846 }
847
848 /*
849  * Add a new dependency to the head of the list:
850  */
851 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
852                             struct list_head *head, unsigned long ip,
853                             int distance, struct stack_trace *trace)
854 {
855         struct lock_list *entry;
856         /*
857          * Lock not present yet - get a new dependency struct and
858          * add it to the list:
859          */
860         entry = alloc_list_entry();
861         if (!entry)
862                 return 0;
863
864         entry->class = this;
865         entry->distance = distance;
866         entry->trace = *trace;
867         /*
868          * Since we never remove from the dependency list, the list can
869          * be walked lockless by other CPUs, it's only allocation
870          * that must be protected by the spinlock. But this also means
871          * we must make new entries visible only once writes to the
872          * entry become visible - hence the RCU op:
873          */
874         list_add_tail_rcu(&entry->entry, head);
875
876         return 1;
877 }
878
879 /*
880  * For good efficiency of modular, we use power of 2
881  */
882 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
883 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
884
885 /*
886  * The circular_queue and helpers is used to implement the
887  * breadth-first search(BFS)algorithem, by which we can build
888  * the shortest path from the next lock to be acquired to the
889  * previous held lock if there is a circular between them.
890  */
891 struct circular_queue {
892         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
893         unsigned int  front, rear;
894 };
895
896 static struct circular_queue lock_cq;
897
898 unsigned int max_bfs_queue_depth;
899
900 static unsigned int lockdep_dependency_gen_id;
901
902 static inline void __cq_init(struct circular_queue *cq)
903 {
904         cq->front = cq->rear = 0;
905         lockdep_dependency_gen_id++;
906 }
907
908 static inline int __cq_empty(struct circular_queue *cq)
909 {
910         return (cq->front == cq->rear);
911 }
912
913 static inline int __cq_full(struct circular_queue *cq)
914 {
915         return ((cq->rear + 1) & CQ_MASK) == cq->front;
916 }
917
918 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
919 {
920         if (__cq_full(cq))
921                 return -1;
922
923         cq->element[cq->rear] = elem;
924         cq->rear = (cq->rear + 1) & CQ_MASK;
925         return 0;
926 }
927
928 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
929 {
930         if (__cq_empty(cq))
931                 return -1;
932
933         *elem = cq->element[cq->front];
934         cq->front = (cq->front + 1) & CQ_MASK;
935         return 0;
936 }
937
938 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
939 {
940         return (cq->rear - cq->front) & CQ_MASK;
941 }
942
943 static inline void mark_lock_accessed(struct lock_list *lock,
944                                         struct lock_list *parent)
945 {
946         unsigned long nr;
947
948         nr = lock - list_entries;
949         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
950         lock->parent = parent;
951         lock->class->dep_gen_id = lockdep_dependency_gen_id;
952 }
953
954 static inline unsigned long lock_accessed(struct lock_list *lock)
955 {
956         unsigned long nr;
957
958         nr = lock - list_entries;
959         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
960         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
961 }
962
963 static inline struct lock_list *get_lock_parent(struct lock_list *child)
964 {
965         return child->parent;
966 }
967
968 static inline int get_lock_depth(struct lock_list *child)
969 {
970         int depth = 0;
971         struct lock_list *parent;
972
973         while ((parent = get_lock_parent(child))) {
974                 child = parent;
975                 depth++;
976         }
977         return depth;
978 }
979
980 static int __bfs(struct lock_list *source_entry,
981                  void *data,
982                  int (*match)(struct lock_list *entry, void *data),
983                  struct lock_list **target_entry,
984                  int forward)
985 {
986         struct lock_list *entry;
987         struct list_head *head;
988         struct circular_queue *cq = &lock_cq;
989         int ret = 1;
990
991         if (match(source_entry, data)) {
992                 *target_entry = source_entry;
993                 ret = 0;
994                 goto exit;
995         }
996
997         if (forward)
998                 head = &source_entry->class->locks_after;
999         else
1000                 head = &source_entry->class->locks_before;
1001
1002         if (list_empty(head))
1003                 goto exit;
1004
1005         __cq_init(cq);
1006         __cq_enqueue(cq, (unsigned long)source_entry);
1007
1008         while (!__cq_empty(cq)) {
1009                 struct lock_list *lock;
1010
1011                 __cq_dequeue(cq, (unsigned long *)&lock);
1012
1013                 if (!lock->class) {
1014                         ret = -2;
1015                         goto exit;
1016                 }
1017
1018                 if (forward)
1019                         head = &lock->class->locks_after;
1020                 else
1021                         head = &lock->class->locks_before;
1022
1023                 list_for_each_entry(entry, head, entry) {
1024                         if (!lock_accessed(entry)) {
1025                                 unsigned int cq_depth;
1026                                 mark_lock_accessed(entry, lock);
1027                                 if (match(entry, data)) {
1028                                         *target_entry = entry;
1029                                         ret = 0;
1030                                         goto exit;
1031                                 }
1032
1033                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1034                                         ret = -1;
1035                                         goto exit;
1036                                 }
1037                                 cq_depth = __cq_get_elem_count(cq);
1038                                 if (max_bfs_queue_depth < cq_depth)
1039                                         max_bfs_queue_depth = cq_depth;
1040                         }
1041                 }
1042         }
1043 exit:
1044         return ret;
1045 }
1046
1047 static inline int __bfs_forwards(struct lock_list *src_entry,
1048                         void *data,
1049                         int (*match)(struct lock_list *entry, void *data),
1050                         struct lock_list **target_entry)
1051 {
1052         return __bfs(src_entry, data, match, target_entry, 1);
1053
1054 }
1055
1056 static inline int __bfs_backwards(struct lock_list *src_entry,
1057                         void *data,
1058                         int (*match)(struct lock_list *entry, void *data),
1059                         struct lock_list **target_entry)
1060 {
1061         return __bfs(src_entry, data, match, target_entry, 0);
1062
1063 }
1064
1065 /*
1066  * Recursive, forwards-direction lock-dependency checking, used for
1067  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1068  * checking.
1069  */
1070
1071 /*
1072  * Print a dependency chain entry (this is only done when a deadlock
1073  * has been detected):
1074  */
1075 static noinline int
1076 print_circular_bug_entry(struct lock_list *target, int depth)
1077 {
1078         if (debug_locks_silent)
1079                 return 0;
1080         printk("\n-> #%u", depth);
1081         print_lock_name(target->class);
1082         printk(":\n");
1083         print_stack_trace(&target->trace, 6);
1084
1085         return 0;
1086 }
1087
1088 static void
1089 print_circular_lock_scenario(struct held_lock *src,
1090                              struct held_lock *tgt,
1091                              struct lock_list *prt)
1092 {
1093         struct lock_class *source = hlock_class(src);
1094         struct lock_class *target = hlock_class(tgt);
1095         struct lock_class *parent = prt->class;
1096
1097         /*
1098          * A direct locking problem where unsafe_class lock is taken
1099          * directly by safe_class lock, then all we need to show
1100          * is the deadlock scenario, as it is obvious that the
1101          * unsafe lock is taken under the safe lock.
1102          *
1103          * But if there is a chain instead, where the safe lock takes
1104          * an intermediate lock (middle_class) where this lock is
1105          * not the same as the safe lock, then the lock chain is
1106          * used to describe the problem. Otherwise we would need
1107          * to show a different CPU case for each link in the chain
1108          * from the safe_class lock to the unsafe_class lock.
1109          */
1110         if (parent != source) {
1111                 printk("Chain exists of:\n  ");
1112                 __print_lock_name(source);
1113                 printk(" --> ");
1114                 __print_lock_name(parent);
1115                 printk(" --> ");
1116                 __print_lock_name(target);
1117                 printk("\n\n");
1118         }
1119
1120         printk(" Possible unsafe locking scenario:\n\n");
1121         printk("       CPU0                    CPU1\n");
1122         printk("       ----                    ----\n");
1123         printk("  lock(");
1124         __print_lock_name(target);
1125         printk(");\n");
1126         printk("                               lock(");
1127         __print_lock_name(parent);
1128         printk(");\n");
1129         printk("                               lock(");
1130         __print_lock_name(target);
1131         printk(");\n");
1132         printk("  lock(");
1133         __print_lock_name(source);
1134         printk(");\n");
1135         printk("\n *** DEADLOCK ***\n\n");
1136 }
1137
1138 /*
1139  * When a circular dependency is detected, print the
1140  * header first:
1141  */
1142 static noinline int
1143 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1144                         struct held_lock *check_src,
1145                         struct held_lock *check_tgt)
1146 {
1147         struct task_struct *curr = current;
1148
1149         if (debug_locks_silent)
1150                 return 0;
1151
1152         printk("\n");
1153         printk("======================================================\n");
1154         printk("[ INFO: possible circular locking dependency detected ]\n");
1155         print_kernel_ident();
1156         printk("-------------------------------------------------------\n");
1157         printk("%s/%d is trying to acquire lock:\n",
1158                 curr->comm, task_pid_nr(curr));
1159         print_lock(check_src);
1160         printk("\nbut task is already holding lock:\n");
1161         print_lock(check_tgt);
1162         printk("\nwhich lock already depends on the new lock.\n\n");
1163         printk("\nthe existing dependency chain (in reverse order) is:\n");
1164
1165         print_circular_bug_entry(entry, depth);
1166
1167         return 0;
1168 }
1169
1170 static inline int class_equal(struct lock_list *entry, void *data)
1171 {
1172         return entry->class == data;
1173 }
1174
1175 static noinline int print_circular_bug(struct lock_list *this,
1176                                 struct lock_list *target,
1177                                 struct held_lock *check_src,
1178                                 struct held_lock *check_tgt)
1179 {
1180         struct task_struct *curr = current;
1181         struct lock_list *parent;
1182         struct lock_list *first_parent;
1183         int depth;
1184
1185         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1186                 return 0;
1187
1188         if (!save_trace(&this->trace))
1189                 return 0;
1190
1191         depth = get_lock_depth(target);
1192
1193         print_circular_bug_header(target, depth, check_src, check_tgt);
1194
1195         parent = get_lock_parent(target);
1196         first_parent = parent;
1197
1198         while (parent) {
1199                 print_circular_bug_entry(parent, --depth);
1200                 parent = get_lock_parent(parent);
1201         }
1202
1203         printk("\nother info that might help us debug this:\n\n");
1204         print_circular_lock_scenario(check_src, check_tgt,
1205                                      first_parent);
1206
1207         lockdep_print_held_locks(curr);
1208
1209         printk("\nstack backtrace:\n");
1210         dump_stack();
1211
1212         return 0;
1213 }
1214
1215 static noinline int print_bfs_bug(int ret)
1216 {
1217         if (!debug_locks_off_graph_unlock())
1218                 return 0;
1219
1220         /*
1221          * Breadth-first-search failed, graph got corrupted?
1222          */
1223         WARN(1, "lockdep bfs error:%d\n", ret);
1224
1225         return 0;
1226 }
1227
1228 static int noop_count(struct lock_list *entry, void *data)
1229 {
1230         (*(unsigned long *)data)++;
1231         return 0;
1232 }
1233
1234 unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1235 {
1236         unsigned long  count = 0;
1237         struct lock_list *uninitialized_var(target_entry);
1238
1239         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1240
1241         return count;
1242 }
1243 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1244 {
1245         unsigned long ret, flags;
1246         struct lock_list this;
1247
1248         this.parent = NULL;
1249         this.class = class;
1250
1251         local_irq_save(flags);
1252         arch_spin_lock(&lockdep_lock);
1253         ret = __lockdep_count_forward_deps(&this);
1254         arch_spin_unlock(&lockdep_lock);
1255         local_irq_restore(flags);
1256
1257         return ret;
1258 }
1259
1260 unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1261 {
1262         unsigned long  count = 0;
1263         struct lock_list *uninitialized_var(target_entry);
1264
1265         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1266
1267         return count;
1268 }
1269
1270 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1271 {
1272         unsigned long ret, flags;
1273         struct lock_list this;
1274
1275         this.parent = NULL;
1276         this.class = class;
1277
1278         local_irq_save(flags);
1279         arch_spin_lock(&lockdep_lock);
1280         ret = __lockdep_count_backward_deps(&this);
1281         arch_spin_unlock(&lockdep_lock);
1282         local_irq_restore(flags);
1283
1284         return ret;
1285 }
1286
1287 /*
1288  * Prove that the dependency graph starting at <entry> can not
1289  * lead to <target>. Print an error and return 0 if it does.
1290  */
1291 static noinline int
1292 check_noncircular(struct lock_list *root, struct lock_class *target,
1293                 struct lock_list **target_entry)
1294 {
1295         int result;
1296
1297         debug_atomic_inc(nr_cyclic_checks);
1298
1299         result = __bfs_forwards(root, target, class_equal, target_entry);
1300
1301         return result;
1302 }
1303
1304 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1305 /*
1306  * Forwards and backwards subgraph searching, for the purposes of
1307  * proving that two subgraphs can be connected by a new dependency
1308  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1309  */
1310
1311 static inline int usage_match(struct lock_list *entry, void *bit)
1312 {
1313         return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1314 }
1315
1316
1317
1318 /*
1319  * Find a node in the forwards-direction dependency sub-graph starting
1320  * at @root->class that matches @bit.
1321  *
1322  * Return 0 if such a node exists in the subgraph, and put that node
1323  * into *@target_entry.
1324  *
1325  * Return 1 otherwise and keep *@target_entry unchanged.
1326  * Return <0 on error.
1327  */
1328 static int
1329 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1330                         struct lock_list **target_entry)
1331 {
1332         int result;
1333
1334         debug_atomic_inc(nr_find_usage_forwards_checks);
1335
1336         result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1337
1338         return result;
1339 }
1340
1341 /*
1342  * Find a node in the backwards-direction dependency sub-graph starting
1343  * at @root->class that matches @bit.
1344  *
1345  * Return 0 if such a node exists in the subgraph, and put that node
1346  * into *@target_entry.
1347  *
1348  * Return 1 otherwise and keep *@target_entry unchanged.
1349  * Return <0 on error.
1350  */
1351 static int
1352 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1353                         struct lock_list **target_entry)
1354 {
1355         int result;
1356
1357         debug_atomic_inc(nr_find_usage_backwards_checks);
1358
1359         result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1360
1361         return result;
1362 }
1363
1364 static void print_lock_class_header(struct lock_class *class, int depth)
1365 {
1366         int bit;
1367
1368         printk("%*s->", depth, "");
1369         print_lock_name(class);
1370         printk(" ops: %lu", class->ops);
1371         printk(" {\n");
1372
1373         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1374                 if (class->usage_mask & (1 << bit)) {
1375                         int len = depth;
1376
1377                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1378                         len += printk(" at:\n");
1379                         print_stack_trace(class->usage_traces + bit, len);
1380                 }
1381         }
1382         printk("%*s }\n", depth, "");
1383
1384         printk("%*s ... key      at: ",depth,"");
1385         print_ip_sym((unsigned long)class->key);
1386 }
1387
1388 /*
1389  * printk the shortest lock dependencies from @start to @end in reverse order:
1390  */
1391 static void __used
1392 print_shortest_lock_dependencies(struct lock_list *leaf,
1393                                 struct lock_list *root)
1394 {
1395         struct lock_list *entry = leaf;
1396         int depth;
1397
1398         /*compute depth from generated tree by BFS*/
1399         depth = get_lock_depth(leaf);
1400
1401         do {
1402                 print_lock_class_header(entry->class, depth);
1403                 printk("%*s ... acquired at:\n", depth, "");
1404                 print_stack_trace(&entry->trace, 2);
1405                 printk("\n");
1406
1407                 if (depth == 0 && (entry != root)) {
1408                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1409                         break;
1410                 }
1411
1412                 entry = get_lock_parent(entry);
1413                 depth--;
1414         } while (entry && (depth >= 0));
1415
1416         return;
1417 }
1418
1419 static void
1420 print_irq_lock_scenario(struct lock_list *safe_entry,
1421                         struct lock_list *unsafe_entry,
1422                         struct lock_class *prev_class,
1423                         struct lock_class *next_class)
1424 {
1425         struct lock_class *safe_class = safe_entry->class;
1426         struct lock_class *unsafe_class = unsafe_entry->class;
1427         struct lock_class *middle_class = prev_class;
1428
1429         if (middle_class == safe_class)
1430                 middle_class = next_class;
1431
1432         /*
1433          * A direct locking problem where unsafe_class lock is taken
1434          * directly by safe_class lock, then all we need to show
1435          * is the deadlock scenario, as it is obvious that the
1436          * unsafe lock is taken under the safe lock.
1437          *
1438          * But if there is a chain instead, where the safe lock takes
1439          * an intermediate lock (middle_class) where this lock is
1440          * not the same as the safe lock, then the lock chain is
1441          * used to describe the problem. Otherwise we would need
1442          * to show a different CPU case for each link in the chain
1443          * from the safe_class lock to the unsafe_class lock.
1444          */
1445         if (middle_class != unsafe_class) {
1446                 printk("Chain exists of:\n  ");
1447                 __print_lock_name(safe_class);
1448                 printk(" --> ");
1449                 __print_lock_name(middle_class);
1450                 printk(" --> ");
1451                 __print_lock_name(unsafe_class);
1452                 printk("\n\n");
1453         }
1454
1455         printk(" Possible interrupt unsafe locking scenario:\n\n");
1456         printk("       CPU0                    CPU1\n");
1457         printk("       ----                    ----\n");
1458         printk("  lock(");
1459         __print_lock_name(unsafe_class);
1460         printk(");\n");
1461         printk("                               local_irq_disable();\n");
1462         printk("                               lock(");
1463         __print_lock_name(safe_class);
1464         printk(");\n");
1465         printk("                               lock(");
1466         __print_lock_name(middle_class);
1467         printk(");\n");
1468         printk("  <Interrupt>\n");
1469         printk("    lock(");
1470         __print_lock_name(safe_class);
1471         printk(");\n");
1472         printk("\n *** DEADLOCK ***\n\n");
1473 }
1474
1475 static int
1476 print_bad_irq_dependency(struct task_struct *curr,
1477                          struct lock_list *prev_root,
1478                          struct lock_list *next_root,
1479                          struct lock_list *backwards_entry,
1480                          struct lock_list *forwards_entry,
1481                          struct held_lock *prev,
1482                          struct held_lock *next,
1483                          enum lock_usage_bit bit1,
1484                          enum lock_usage_bit bit2,
1485                          const char *irqclass)
1486 {
1487         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1488                 return 0;
1489
1490         printk("\n");
1491         printk("======================================================\n");
1492         printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1493                 irqclass, irqclass);
1494         print_kernel_ident();
1495         printk("------------------------------------------------------\n");
1496         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1497                 curr->comm, task_pid_nr(curr),
1498                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1499                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1500                 curr->hardirqs_enabled,
1501                 curr->softirqs_enabled);
1502         print_lock(next);
1503
1504         printk("\nand this task is already holding:\n");
1505         print_lock(prev);
1506         printk("which would create a new lock dependency:\n");
1507         print_lock_name(hlock_class(prev));
1508         printk(" ->");
1509         print_lock_name(hlock_class(next));
1510         printk("\n");
1511
1512         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1513                 irqclass);
1514         print_lock_name(backwards_entry->class);
1515         printk("\n... which became %s-irq-safe at:\n", irqclass);
1516
1517         print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1518
1519         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1520         print_lock_name(forwards_entry->class);
1521         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1522         printk("...");
1523
1524         print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1525
1526         printk("\nother info that might help us debug this:\n\n");
1527         print_irq_lock_scenario(backwards_entry, forwards_entry,
1528                                 hlock_class(prev), hlock_class(next));
1529
1530         lockdep_print_held_locks(curr);
1531
1532         printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1533         printk(" and the holding lock:\n");
1534         if (!save_trace(&prev_root->trace))
1535                 return 0;
1536         print_shortest_lock_dependencies(backwards_entry, prev_root);
1537
1538         printk("\nthe dependencies between the lock to be acquired");
1539         printk(" and %s-irq-unsafe lock:\n", irqclass);
1540         if (!save_trace(&next_root->trace))
1541                 return 0;
1542         print_shortest_lock_dependencies(forwards_entry, next_root);
1543
1544         printk("\nstack backtrace:\n");
1545         dump_stack();
1546
1547         return 0;
1548 }
1549
1550 static int
1551 check_usage(struct task_struct *curr, struct held_lock *prev,
1552             struct held_lock *next, enum lock_usage_bit bit_backwards,
1553             enum lock_usage_bit bit_forwards, const char *irqclass)
1554 {
1555         int ret;
1556         struct lock_list this, that;
1557         struct lock_list *uninitialized_var(target_entry);
1558         struct lock_list *uninitialized_var(target_entry1);
1559
1560         this.parent = NULL;
1561
1562         this.class = hlock_class(prev);
1563         ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1564         if (ret < 0)
1565                 return print_bfs_bug(ret);
1566         if (ret == 1)
1567                 return ret;
1568
1569         that.parent = NULL;
1570         that.class = hlock_class(next);
1571         ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1572         if (ret < 0)
1573                 return print_bfs_bug(ret);
1574         if (ret == 1)
1575                 return ret;
1576
1577         return print_bad_irq_dependency(curr, &this, &that,
1578                         target_entry, target_entry1,
1579                         prev, next,
1580                         bit_backwards, bit_forwards, irqclass);
1581 }
1582
1583 static const char *state_names[] = {
1584 #define LOCKDEP_STATE(__STATE) \
1585         __stringify(__STATE),
1586 #include "lockdep_states.h"
1587 #undef LOCKDEP_STATE
1588 };
1589
1590 static const char *state_rnames[] = {
1591 #define LOCKDEP_STATE(__STATE) \
1592         __stringify(__STATE)"-READ",
1593 #include "lockdep_states.h"
1594 #undef LOCKDEP_STATE
1595 };
1596
1597 static inline const char *state_name(enum lock_usage_bit bit)
1598 {
1599         return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1600 }
1601
1602 static int exclusive_bit(int new_bit)
1603 {
1604         /*
1605          * USED_IN
1606          * USED_IN_READ
1607          * ENABLED
1608          * ENABLED_READ
1609          *
1610          * bit 0 - write/read
1611          * bit 1 - used_in/enabled
1612          * bit 2+  state
1613          */
1614
1615         int state = new_bit & ~3;
1616         int dir = new_bit & 2;
1617
1618         /*
1619          * keep state, bit flip the direction and strip read.
1620          */
1621         return state | (dir ^ 2);
1622 }
1623
1624 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1625                            struct held_lock *next, enum lock_usage_bit bit)
1626 {
1627         /*
1628          * Prove that the new dependency does not connect a hardirq-safe
1629          * lock with a hardirq-unsafe lock - to achieve this we search
1630          * the backwards-subgraph starting at <prev>, and the
1631          * forwards-subgraph starting at <next>:
1632          */
1633         if (!check_usage(curr, prev, next, bit,
1634                            exclusive_bit(bit), state_name(bit)))
1635                 return 0;
1636
1637         bit++; /* _READ */
1638
1639         /*
1640          * Prove that the new dependency does not connect a hardirq-safe-read
1641          * lock with a hardirq-unsafe lock - to achieve this we search
1642          * the backwards-subgraph starting at <prev>, and the
1643          * forwards-subgraph starting at <next>:
1644          */
1645         if (!check_usage(curr, prev, next, bit,
1646                            exclusive_bit(bit), state_name(bit)))
1647                 return 0;
1648
1649         return 1;
1650 }
1651
1652 static int
1653 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1654                 struct held_lock *next)
1655 {
1656 #define LOCKDEP_STATE(__STATE)                                          \
1657         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1658                 return 0;
1659 #include "lockdep_states.h"
1660 #undef LOCKDEP_STATE
1661
1662         return 1;
1663 }
1664
1665 static void inc_chains(void)
1666 {
1667         if (current->hardirq_context)
1668                 nr_hardirq_chains++;
1669         else {
1670                 if (current->softirq_context)
1671                         nr_softirq_chains++;
1672                 else
1673                         nr_process_chains++;
1674         }
1675 }
1676
1677 #else
1678
1679 static inline int
1680 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1681                 struct held_lock *next)
1682 {
1683         return 1;
1684 }
1685
1686 static inline void inc_chains(void)
1687 {
1688         nr_process_chains++;
1689 }
1690
1691 #endif
1692
1693 static void
1694 print_deadlock_scenario(struct held_lock *nxt,
1695                              struct held_lock *prv)
1696 {
1697         struct lock_class *next = hlock_class(nxt);
1698         struct lock_class *prev = hlock_class(prv);
1699
1700         printk(" Possible unsafe locking scenario:\n\n");
1701         printk("       CPU0\n");
1702         printk("       ----\n");
1703         printk("  lock(");
1704         __print_lock_name(prev);
1705         printk(");\n");
1706         printk("  lock(");
1707         __print_lock_name(next);
1708         printk(");\n");
1709         printk("\n *** DEADLOCK ***\n\n");
1710         printk(" May be due to missing lock nesting notation\n\n");
1711 }
1712
1713 static int
1714 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1715                    struct held_lock *next)
1716 {
1717         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1718                 return 0;
1719
1720         printk("\n");
1721         printk("=============================================\n");
1722         printk("[ INFO: possible recursive locking detected ]\n");
1723         print_kernel_ident();
1724         printk("---------------------------------------------\n");
1725         printk("%s/%d is trying to acquire lock:\n",
1726                 curr->comm, task_pid_nr(curr));
1727         print_lock(next);
1728         printk("\nbut task is already holding lock:\n");
1729         print_lock(prev);
1730
1731         printk("\nother info that might help us debug this:\n");
1732         print_deadlock_scenario(next, prev);
1733         lockdep_print_held_locks(curr);
1734
1735         printk("\nstack backtrace:\n");
1736         dump_stack();
1737
1738         return 0;
1739 }
1740
1741 /*
1742  * Check whether we are holding such a class already.
1743  *
1744  * (Note that this has to be done separately, because the graph cannot
1745  * detect such classes of deadlocks.)
1746  *
1747  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1748  */
1749 static int
1750 check_deadlock(struct task_struct *curr, struct held_lock *next,
1751                struct lockdep_map *next_instance, int read)
1752 {
1753         struct held_lock *prev;
1754         struct held_lock *nest = NULL;
1755         int i;
1756
1757         for (i = 0; i < curr->lockdep_depth; i++) {
1758                 prev = curr->held_locks + i;
1759
1760                 if (prev->instance == next->nest_lock)
1761                         nest = prev;
1762
1763                 if (hlock_class(prev) != hlock_class(next))
1764                         continue;
1765
1766                 /*
1767                  * Allow read-after-read recursion of the same
1768                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1769                  */
1770                 if ((read == 2) && prev->read)
1771                         return 2;
1772
1773                 /*
1774                  * We're holding the nest_lock, which serializes this lock's
1775                  * nesting behaviour.
1776                  */
1777                 if (nest)
1778                         return 2;
1779
1780                 return print_deadlock_bug(curr, prev, next);
1781         }
1782         return 1;
1783 }
1784
1785 /*
1786  * There was a chain-cache miss, and we are about to add a new dependency
1787  * to a previous lock. We recursively validate the following rules:
1788  *
1789  *  - would the adding of the <prev> -> <next> dependency create a
1790  *    circular dependency in the graph? [== circular deadlock]
1791  *
1792  *  - does the new prev->next dependency connect any hardirq-safe lock
1793  *    (in the full backwards-subgraph starting at <prev>) with any
1794  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1795  *    <next>)? [== illegal lock inversion with hardirq contexts]
1796  *
1797  *  - does the new prev->next dependency connect any softirq-safe lock
1798  *    (in the full backwards-subgraph starting at <prev>) with any
1799  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1800  *    <next>)? [== illegal lock inversion with softirq contexts]
1801  *
1802  * any of these scenarios could lead to a deadlock.
1803  *
1804  * Then if all the validations pass, we add the forwards and backwards
1805  * dependency.
1806  */
1807 static int
1808 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1809                struct held_lock *next, int distance, int trylock_loop)
1810 {
1811         struct lock_list *entry;
1812         int ret;
1813         struct lock_list this;
1814         struct lock_list *uninitialized_var(target_entry);
1815         /*
1816          * Static variable, serialized by the graph_lock().
1817          *
1818          * We use this static variable to save the stack trace in case
1819          * we call into this function multiple times due to encountering
1820          * trylocks in the held lock stack.
1821          */
1822         static struct stack_trace trace;
1823
1824         /*
1825          * Prove that the new <prev> -> <next> dependency would not
1826          * create a circular dependency in the graph. (We do this by
1827          * forward-recursing into the graph starting at <next>, and
1828          * checking whether we can reach <prev>.)
1829          *
1830          * We are using global variables to control the recursion, to
1831          * keep the stackframe size of the recursive functions low:
1832          */
1833         this.class = hlock_class(next);
1834         this.parent = NULL;
1835         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1836         if (unlikely(!ret))
1837                 return print_circular_bug(&this, target_entry, next, prev);
1838         else if (unlikely(ret < 0))
1839                 return print_bfs_bug(ret);
1840
1841         if (!check_prev_add_irq(curr, prev, next))
1842                 return 0;
1843
1844         /*
1845          * For recursive read-locks we do all the dependency checks,
1846          * but we dont store read-triggered dependencies (only
1847          * write-triggered dependencies). This ensures that only the
1848          * write-side dependencies matter, and that if for example a
1849          * write-lock never takes any other locks, then the reads are
1850          * equivalent to a NOP.
1851          */
1852         if (next->read == 2 || prev->read == 2)
1853                 return 1;
1854         /*
1855          * Is the <prev> -> <next> dependency already present?
1856          *
1857          * (this may occur even though this is a new chain: consider
1858          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1859          *  chains - the second one will be new, but L1 already has
1860          *  L2 added to its dependency list, due to the first chain.)
1861          */
1862         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1863                 if (entry->class == hlock_class(next)) {
1864                         if (distance == 1)
1865                                 entry->distance = 1;
1866                         return 2;
1867                 }
1868         }
1869
1870         if (!trylock_loop && !save_trace(&trace))
1871                 return 0;
1872
1873         /*
1874          * Ok, all validations passed, add the new lock
1875          * to the previous lock's dependency list:
1876          */
1877         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1878                                &hlock_class(prev)->locks_after,
1879                                next->acquire_ip, distance, &trace);
1880
1881         if (!ret)
1882                 return 0;
1883
1884         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1885                                &hlock_class(next)->locks_before,
1886                                next->acquire_ip, distance, &trace);
1887         if (!ret)
1888                 return 0;
1889
1890         /*
1891          * Debugging printouts:
1892          */
1893         if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1894                 graph_unlock();
1895                 printk("\n new dependency: ");
1896                 print_lock_name(hlock_class(prev));
1897                 printk(" => ");
1898                 print_lock_name(hlock_class(next));
1899                 printk("\n");
1900                 dump_stack();
1901                 return graph_lock();
1902         }
1903         return 1;
1904 }
1905
1906 /*
1907  * Add the dependency to all directly-previous locks that are 'relevant'.
1908  * The ones that are relevant are (in increasing distance from curr):
1909  * all consecutive trylock entries and the final non-trylock entry - or
1910  * the end of this context's lock-chain - whichever comes first.
1911  */
1912 static int
1913 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1914 {
1915         int depth = curr->lockdep_depth;
1916         int trylock_loop = 0;
1917         struct held_lock *hlock;
1918
1919         /*
1920          * Debugging checks.
1921          *
1922          * Depth must not be zero for a non-head lock:
1923          */
1924         if (!depth)
1925                 goto out_bug;
1926         /*
1927          * At least two relevant locks must exist for this
1928          * to be a head:
1929          */
1930         if (curr->held_locks[depth].irq_context !=
1931                         curr->held_locks[depth-1].irq_context)
1932                 goto out_bug;
1933
1934         for (;;) {
1935                 int distance = curr->lockdep_depth - depth + 1;
1936                 hlock = curr->held_locks + depth-1;
1937                 /*
1938                  * Only non-recursive-read entries get new dependencies
1939                  * added:
1940                  */
1941                 if (hlock->read != 2) {
1942                         if (!check_prev_add(curr, hlock, next,
1943                                                 distance, trylock_loop))
1944                                 return 0;
1945                         /*
1946                          * Stop after the first non-trylock entry,
1947                          * as non-trylock entries have added their
1948                          * own direct dependencies already, so this
1949                          * lock is connected to them indirectly:
1950                          */
1951                         if (!hlock->trylock)
1952                                 break;
1953                 }
1954                 depth--;
1955                 /*
1956                  * End of lock-stack?
1957                  */
1958                 if (!depth)
1959                         break;
1960                 /*
1961                  * Stop the search if we cross into another context:
1962                  */
1963                 if (curr->held_locks[depth].irq_context !=
1964                                 curr->held_locks[depth-1].irq_context)
1965                         break;
1966                 trylock_loop = 1;
1967         }
1968         return 1;
1969 out_bug:
1970         if (!debug_locks_off_graph_unlock())
1971                 return 0;
1972
1973         /*
1974          * Clearly we all shouldn't be here, but since we made it we
1975          * can reliable say we messed up our state. See the above two
1976          * gotos for reasons why we could possibly end up here.
1977          */
1978         WARN_ON(1);
1979
1980         return 0;
1981 }
1982
1983 unsigned long nr_lock_chains;
1984 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1985 int nr_chain_hlocks;
1986 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1987
1988 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1989 {
1990         return lock_classes + chain_hlocks[chain->base + i];
1991 }
1992
1993 /*
1994  * Look up a dependency chain. If the key is not present yet then
1995  * add it and return 1 - in this case the new dependency chain is
1996  * validated. If the key is already hashed, return 0.
1997  * (On return with 1 graph_lock is held.)
1998  */
1999 static inline int lookup_chain_cache(struct task_struct *curr,
2000                                      struct held_lock *hlock,
2001                                      u64 chain_key)
2002 {
2003         struct lock_class *class = hlock_class(hlock);
2004         struct list_head *hash_head = chainhashentry(chain_key);
2005         struct lock_chain *chain;
2006         struct held_lock *hlock_curr;
2007         int i, j;
2008
2009         /*
2010          * We might need to take the graph lock, ensure we've got IRQs
2011          * disabled to make this an IRQ-safe lock.. for recursion reasons
2012          * lockdep won't complain about its own locking errors.
2013          */
2014         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2015                 return 0;
2016         /*
2017          * We can walk it lock-free, because entries only get added
2018          * to the hash:
2019          */
2020         list_for_each_entry(chain, hash_head, entry) {
2021                 if (chain->chain_key == chain_key) {
2022 cache_hit:
2023                         debug_atomic_inc(chain_lookup_hits);
2024                         if (very_verbose(class))
2025                                 printk("\nhash chain already cached, key: "
2026                                         "%016Lx tail class: [%p] %s\n",
2027                                         (unsigned long long)chain_key,
2028                                         class->key, class->name);
2029                         return 0;
2030                 }
2031         }
2032         if (very_verbose(class))
2033                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2034                         (unsigned long long)chain_key, class->key, class->name);
2035         /*
2036          * Allocate a new chain entry from the static array, and add
2037          * it to the hash:
2038          */
2039         if (!graph_lock())
2040                 return 0;
2041         /*
2042          * We have to walk the chain again locked - to avoid duplicates:
2043          */
2044         list_for_each_entry(chain, hash_head, entry) {
2045                 if (chain->chain_key == chain_key) {
2046                         graph_unlock();
2047                         goto cache_hit;
2048                 }
2049         }
2050         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2051                 if (!debug_locks_off_graph_unlock())
2052                         return 0;
2053
2054                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
2055                 printk("turning off the locking correctness validator.\n");
2056                 printk("Attach output of /proc/lock_stat to bug report\n");
2057                 dump_stack();
2058                 return 0;
2059         }
2060         chain = lock_chains + nr_lock_chains++;
2061         chain->chain_key = chain_key;
2062         chain->irq_context = hlock->irq_context;
2063         /* Find the first held_lock of current chain */
2064         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2065                 hlock_curr = curr->held_locks + i;
2066                 if (hlock_curr->irq_context != hlock->irq_context)
2067                         break;
2068         }
2069         i++;
2070         chain->depth = curr->lockdep_depth + 1 - i;
2071         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2072                 chain->base = nr_chain_hlocks;
2073                 nr_chain_hlocks += chain->depth;
2074                 for (j = 0; j < chain->depth - 1; j++, i++) {
2075                         int lock_id = curr->held_locks[i].class_idx - 1;
2076                         chain_hlocks[chain->base + j] = lock_id;
2077                 }
2078                 chain_hlocks[chain->base + j] = class - lock_classes;
2079         }
2080         list_add_tail_rcu(&chain->entry, hash_head);
2081         debug_atomic_inc(chain_lookup_misses);
2082         inc_chains();
2083
2084         return 1;
2085 }
2086
2087 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2088                 struct held_lock *hlock, int chain_head, u64 chain_key)
2089 {
2090         /*
2091          * Trylock needs to maintain the stack of held locks, but it
2092          * does not add new dependencies, because trylock can be done
2093          * in any order.
2094          *
2095          * We look up the chain_key and do the O(N^2) check and update of
2096          * the dependencies only if this is a new dependency chain.
2097          * (If lookup_chain_cache() returns with 1 it acquires
2098          * graph_lock for us)
2099          */
2100         if (!hlock->trylock && (hlock->check == 2) &&
2101             lookup_chain_cache(curr, hlock, chain_key)) {
2102                 /*
2103                  * Check whether last held lock:
2104                  *
2105                  * - is irq-safe, if this lock is irq-unsafe
2106                  * - is softirq-safe, if this lock is hardirq-unsafe
2107                  *
2108                  * And check whether the new lock's dependency graph
2109                  * could lead back to the previous lock.
2110                  *
2111                  * any of these scenarios could lead to a deadlock. If
2112                  * All validations
2113                  */
2114                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2115
2116                 if (!ret)
2117                         return 0;
2118                 /*
2119                  * Mark recursive read, as we jump over it when
2120                  * building dependencies (just like we jump over
2121                  * trylock entries):
2122                  */
2123                 if (ret == 2)
2124                         hlock->read = 2;
2125                 /*
2126                  * Add dependency only if this lock is not the head
2127                  * of the chain, and if it's not a secondary read-lock:
2128                  */
2129                 if (!chain_head && ret != 2)
2130                         if (!check_prevs_add(curr, hlock))
2131                                 return 0;
2132                 graph_unlock();
2133         } else
2134                 /* after lookup_chain_cache(): */
2135                 if (unlikely(!debug_locks))
2136                         return 0;
2137
2138         return 1;
2139 }
2140 #else
2141 static inline int validate_chain(struct task_struct *curr,
2142                 struct lockdep_map *lock, struct held_lock *hlock,
2143                 int chain_head, u64 chain_key)
2144 {
2145         return 1;
2146 }
2147 #endif
2148
2149 /*
2150  * We are building curr_chain_key incrementally, so double-check
2151  * it from scratch, to make sure that it's done correctly:
2152  */
2153 static void check_chain_key(struct task_struct *curr)
2154 {
2155 #ifdef CONFIG_DEBUG_LOCKDEP
2156         struct held_lock *hlock, *prev_hlock = NULL;
2157         unsigned int i, id;
2158         u64 chain_key = 0;
2159
2160         for (i = 0; i < curr->lockdep_depth; i++) {
2161                 hlock = curr->held_locks + i;
2162                 if (chain_key != hlock->prev_chain_key) {
2163                         debug_locks_off();
2164                         /*
2165                          * We got mighty confused, our chain keys don't match
2166                          * with what we expect, someone trample on our task state?
2167                          */
2168                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2169                                 curr->lockdep_depth, i,
2170                                 (unsigned long long)chain_key,
2171                                 (unsigned long long)hlock->prev_chain_key);
2172                         return;
2173                 }
2174                 id = hlock->class_idx - 1;
2175                 /*
2176                  * Whoops ran out of static storage again?
2177                  */
2178                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2179                         return;
2180
2181                 if (prev_hlock && (prev_hlock->irq_context !=
2182                                                         hlock->irq_context))
2183                         chain_key = 0;
2184                 chain_key = iterate_chain_key(chain_key, id);
2185                 prev_hlock = hlock;
2186         }
2187         if (chain_key != curr->curr_chain_key) {
2188                 debug_locks_off();
2189                 /*
2190                  * More smoking hash instead of calculating it, damn see these
2191                  * numbers float.. I bet that a pink elephant stepped on my memory.
2192                  */
2193                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2194                         curr->lockdep_depth, i,
2195                         (unsigned long long)chain_key,
2196                         (unsigned long long)curr->curr_chain_key);
2197         }
2198 #endif
2199 }
2200
2201 static void
2202 print_usage_bug_scenario(struct held_lock *lock)
2203 {
2204         struct lock_class *class = hlock_class(lock);
2205
2206         printk(" Possible unsafe locking scenario:\n\n");
2207         printk("       CPU0\n");
2208         printk("       ----\n");
2209         printk("  lock(");
2210         __print_lock_name(class);
2211         printk(");\n");
2212         printk("  <Interrupt>\n");
2213         printk("    lock(");
2214         __print_lock_name(class);
2215         printk(");\n");
2216         printk("\n *** DEADLOCK ***\n\n");
2217 }
2218
2219 static int
2220 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2221                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2222 {
2223         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2224                 return 0;
2225
2226         printk("\n");
2227         printk("=================================\n");
2228         printk("[ INFO: inconsistent lock state ]\n");
2229         print_kernel_ident();
2230         printk("---------------------------------\n");
2231
2232         printk("inconsistent {%s} -> {%s} usage.\n",
2233                 usage_str[prev_bit], usage_str[new_bit]);
2234
2235         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2236                 curr->comm, task_pid_nr(curr),
2237                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2238                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2239                 trace_hardirqs_enabled(curr),
2240                 trace_softirqs_enabled(curr));
2241         print_lock(this);
2242
2243         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2244         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2245
2246         print_irqtrace_events(curr);
2247         printk("\nother info that might help us debug this:\n");
2248         print_usage_bug_scenario(this);
2249
2250         lockdep_print_held_locks(curr);
2251
2252         printk("\nstack backtrace:\n");
2253         dump_stack();
2254
2255         return 0;
2256 }
2257
2258 /*
2259  * Print out an error if an invalid bit is set:
2260  */
2261 static inline int
2262 valid_state(struct task_struct *curr, struct held_lock *this,
2263             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2264 {
2265         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2266                 return print_usage_bug(curr, this, bad_bit, new_bit);
2267         return 1;
2268 }
2269
2270 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2271                      enum lock_usage_bit new_bit);
2272
2273 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2274
2275 /*
2276  * print irq inversion bug:
2277  */
2278 static int
2279 print_irq_inversion_bug(struct task_struct *curr,
2280                         struct lock_list *root, struct lock_list *other,
2281                         struct held_lock *this, int forwards,
2282                         const char *irqclass)
2283 {
2284         struct lock_list *entry = other;
2285         struct lock_list *middle = NULL;
2286         int depth;
2287
2288         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2289                 return 0;
2290
2291         printk("\n");
2292         printk("=========================================================\n");
2293         printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2294         print_kernel_ident();
2295         printk("---------------------------------------------------------\n");
2296         printk("%s/%d just changed the state of lock:\n",
2297                 curr->comm, task_pid_nr(curr));
2298         print_lock(this);
2299         if (forwards)
2300                 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2301         else
2302                 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2303         print_lock_name(other->class);
2304         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2305
2306         printk("\nother info that might help us debug this:\n");
2307
2308         /* Find a middle lock (if one exists) */
2309         depth = get_lock_depth(other);
2310         do {
2311                 if (depth == 0 && (entry != root)) {
2312                         printk("lockdep:%s bad path found in chain graph\n", __func__);
2313                         break;
2314                 }
2315                 middle = entry;
2316                 entry = get_lock_parent(entry);
2317                 depth--;
2318         } while (entry && entry != root && (depth >= 0));
2319         if (forwards)
2320                 print_irq_lock_scenario(root, other,
2321                         middle ? middle->class : root->class, other->class);
2322         else
2323                 print_irq_lock_scenario(other, root,
2324                         middle ? middle->class : other->class, root->class);
2325
2326         lockdep_print_held_locks(curr);
2327
2328         printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2329         if (!save_trace(&root->trace))
2330                 return 0;
2331         print_shortest_lock_dependencies(other, root);
2332
2333         printk("\nstack backtrace:\n");
2334         dump_stack();
2335
2336         return 0;
2337 }
2338
2339 /*
2340  * Prove that in the forwards-direction subgraph starting at <this>
2341  * there is no lock matching <mask>:
2342  */
2343 static int
2344 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2345                      enum lock_usage_bit bit, const char *irqclass)
2346 {
2347         int ret;
2348         struct lock_list root;
2349         struct lock_list *uninitialized_var(target_entry);
2350
2351         root.parent = NULL;
2352         root.class = hlock_class(this);
2353         ret = find_usage_forwards(&root, bit, &target_entry);
2354         if (ret < 0)
2355                 return print_bfs_bug(ret);
2356         if (ret == 1)
2357                 return ret;
2358
2359         return print_irq_inversion_bug(curr, &root, target_entry,
2360                                         this, 1, irqclass);
2361 }
2362
2363 /*
2364  * Prove that in the backwards-direction subgraph starting at <this>
2365  * there is no lock matching <mask>:
2366  */
2367 static int
2368 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2369                       enum lock_usage_bit bit, const char *irqclass)
2370 {
2371         int ret;
2372         struct lock_list root;
2373         struct lock_list *uninitialized_var(target_entry);
2374
2375         root.parent = NULL;
2376         root.class = hlock_class(this);
2377         ret = find_usage_backwards(&root, bit, &target_entry);
2378         if (ret < 0)
2379                 return print_bfs_bug(ret);
2380         if (ret == 1)
2381                 return ret;
2382
2383         return print_irq_inversion_bug(curr, &root, target_entry,
2384                                         this, 0, irqclass);
2385 }
2386
2387 void print_irqtrace_events(struct task_struct *curr)
2388 {
2389         printk("irq event stamp: %u\n", curr->irq_events);
2390         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
2391         print_ip_sym(curr->hardirq_enable_ip);
2392         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2393         print_ip_sym(curr->hardirq_disable_ip);
2394         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
2395         print_ip_sym(curr->softirq_enable_ip);
2396         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2397         print_ip_sym(curr->softirq_disable_ip);
2398 }
2399
2400 static int HARDIRQ_verbose(struct lock_class *class)
2401 {
2402 #if HARDIRQ_VERBOSE
2403         return class_filter(class);
2404 #endif
2405         return 0;
2406 }
2407
2408 static int SOFTIRQ_verbose(struct lock_class *class)
2409 {
2410 #if SOFTIRQ_VERBOSE
2411         return class_filter(class);
2412 #endif
2413         return 0;
2414 }
2415
2416 static int RECLAIM_FS_verbose(struct lock_class *class)
2417 {
2418 #if RECLAIM_VERBOSE
2419         return class_filter(class);
2420 #endif
2421         return 0;
2422 }
2423
2424 #define STRICT_READ_CHECKS      1
2425
2426 static int (*state_verbose_f[])(struct lock_class *class) = {
2427 #define LOCKDEP_STATE(__STATE) \
2428         __STATE##_verbose,
2429 #include "lockdep_states.h"
2430 #undef LOCKDEP_STATE
2431 };
2432
2433 static inline int state_verbose(enum lock_usage_bit bit,
2434                                 struct lock_class *class)
2435 {
2436         return state_verbose_f[bit >> 2](class);
2437 }
2438
2439 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2440                              enum lock_usage_bit bit, const char *name);
2441
2442 static int
2443 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2444                 enum lock_usage_bit new_bit)
2445 {
2446         int excl_bit = exclusive_bit(new_bit);
2447         int read = new_bit & 1;
2448         int dir = new_bit & 2;
2449
2450         /*
2451          * mark USED_IN has to look forwards -- to ensure no dependency
2452          * has ENABLED state, which would allow recursion deadlocks.
2453          *
2454          * mark ENABLED has to look backwards -- to ensure no dependee
2455          * has USED_IN state, which, again, would allow  recursion deadlocks.
2456          */
2457         check_usage_f usage = dir ?
2458                 check_usage_backwards : check_usage_forwards;
2459
2460         /*
2461          * Validate that this particular lock does not have conflicting
2462          * usage states.
2463          */
2464         if (!valid_state(curr, this, new_bit, excl_bit))
2465                 return 0;
2466
2467         /*
2468          * Validate that the lock dependencies don't have conflicting usage
2469          * states.
2470          */
2471         if ((!read || !dir || STRICT_READ_CHECKS) &&
2472                         !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2473                 return 0;
2474
2475         /*
2476          * Check for read in write conflicts
2477          */
2478         if (!read) {
2479                 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2480                         return 0;
2481
2482                 if (STRICT_READ_CHECKS &&
2483                         !usage(curr, this, excl_bit + 1,
2484                                 state_name(new_bit + 1)))
2485                         return 0;
2486         }
2487
2488         if (state_verbose(new_bit, hlock_class(this)))
2489                 return 2;
2490
2491         return 1;
2492 }
2493
2494 enum mark_type {
2495 #define LOCKDEP_STATE(__STATE)  __STATE,
2496 #include "lockdep_states.h"
2497 #undef LOCKDEP_STATE
2498 };
2499
2500 /*
2501  * Mark all held locks with a usage bit:
2502  */
2503 static int
2504 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2505 {
2506         enum lock_usage_bit usage_bit;
2507         struct held_lock *hlock;
2508         int i;
2509
2510         for (i = 0; i < curr->lockdep_depth; i++) {
2511                 hlock = curr->held_locks + i;
2512
2513                 usage_bit = 2 + (mark << 2); /* ENABLED */
2514                 if (hlock->read)
2515                         usage_bit += 1; /* READ */
2516
2517                 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2518
2519                 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2520                         continue;
2521
2522                 if (!mark_lock(curr, hlock, usage_bit))
2523                         return 0;
2524         }
2525
2526         return 1;
2527 }
2528
2529 /*
2530  * Hardirqs will be enabled:
2531  */
2532 static void __trace_hardirqs_on_caller(unsigned long ip)
2533 {
2534         struct task_struct *curr = current;
2535
2536         /* we'll do an OFF -> ON transition: */
2537         curr->hardirqs_enabled = 1;
2538
2539         /*
2540          * We are going to turn hardirqs on, so set the
2541          * usage bit for all held locks:
2542          */
2543         if (!mark_held_locks(curr, HARDIRQ))
2544                 return;
2545         /*
2546          * If we have softirqs enabled, then set the usage
2547          * bit for all held locks. (disabled hardirqs prevented
2548          * this bit from being set before)
2549          */
2550         if (curr->softirqs_enabled)
2551                 if (!mark_held_locks(curr, SOFTIRQ))
2552                         return;
2553
2554         curr->hardirq_enable_ip = ip;
2555         curr->hardirq_enable_event = ++curr->irq_events;
2556         debug_atomic_inc(hardirqs_on_events);
2557 }
2558
2559 void trace_hardirqs_on_caller(unsigned long ip)
2560 {
2561         time_hardirqs_on(CALLER_ADDR0, ip);
2562
2563         if (unlikely(!debug_locks || current->lockdep_recursion))
2564                 return;
2565
2566         if (unlikely(current->hardirqs_enabled)) {
2567                 /*
2568                  * Neither irq nor preemption are disabled here
2569                  * so this is racy by nature but losing one hit
2570                  * in a stat is not a big deal.
2571                  */
2572                 __debug_atomic_inc(redundant_hardirqs_on);
2573                 return;
2574         }
2575
2576         /*
2577          * We're enabling irqs and according to our state above irqs weren't
2578          * already enabled, yet we find the hardware thinks they are in fact
2579          * enabled.. someone messed up their IRQ state tracing.
2580          */
2581         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2582                 return;
2583
2584         /*
2585          * See the fine text that goes along with this variable definition.
2586          */
2587         if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2588                 return;
2589
2590         /*
2591          * Can't allow enabling interrupts while in an interrupt handler,
2592          * that's general bad form and such. Recursion, limited stack etc..
2593          */
2594         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2595                 return;
2596
2597         current->lockdep_recursion = 1;
2598         __trace_hardirqs_on_caller(ip);
2599         current->lockdep_recursion = 0;
2600 }
2601 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2602
2603 void trace_hardirqs_on(void)
2604 {
2605         trace_hardirqs_on_caller(CALLER_ADDR0);
2606 }
2607 EXPORT_SYMBOL(trace_hardirqs_on);
2608
2609 /*
2610  * Hardirqs were disabled:
2611  */
2612 void trace_hardirqs_off_caller(unsigned long ip)
2613 {
2614         struct task_struct *curr = current;
2615
2616         time_hardirqs_off(CALLER_ADDR0, ip);
2617
2618         if (unlikely(!debug_locks || current->lockdep_recursion))
2619                 return;
2620
2621         /*
2622          * So we're supposed to get called after you mask local IRQs, but for
2623          * some reason the hardware doesn't quite think you did a proper job.
2624          */
2625         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2626                 return;
2627
2628         if (curr->hardirqs_enabled) {
2629                 /*
2630                  * We have done an ON -> OFF transition:
2631                  */
2632                 curr->hardirqs_enabled = 0;
2633                 curr->hardirq_disable_ip = ip;
2634                 curr->hardirq_disable_event = ++curr->irq_events;
2635                 debug_atomic_inc(hardirqs_off_events);
2636         } else
2637                 debug_atomic_inc(redundant_hardirqs_off);
2638 }
2639 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2640
2641 void trace_hardirqs_off(void)
2642 {
2643         trace_hardirqs_off_caller(CALLER_ADDR0);
2644 }
2645 EXPORT_SYMBOL(trace_hardirqs_off);
2646
2647 /*
2648  * Softirqs will be enabled:
2649  */
2650 void trace_softirqs_on(unsigned long ip)
2651 {
2652         struct task_struct *curr = current;
2653
2654         if (unlikely(!debug_locks || current->lockdep_recursion))
2655                 return;
2656
2657         /*
2658          * We fancy IRQs being disabled here, see softirq.c, avoids
2659          * funny state and nesting things.
2660          */
2661         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2662                 return;
2663
2664         if (curr->softirqs_enabled) {
2665                 debug_atomic_inc(redundant_softirqs_on);
2666                 return;
2667         }
2668
2669         current->lockdep_recursion = 1;
2670         /*
2671          * We'll do an OFF -> ON transition:
2672          */
2673         curr->softirqs_enabled = 1;
2674         curr->softirq_enable_ip = ip;
2675         curr->softirq_enable_event = ++curr->irq_events;
2676         debug_atomic_inc(softirqs_on_events);
2677         /*
2678          * We are going to turn softirqs on, so set the
2679          * usage bit for all held locks, if hardirqs are
2680          * enabled too:
2681          */
2682         if (curr->hardirqs_enabled)
2683                 mark_held_locks(curr, SOFTIRQ);
2684         current->lockdep_recursion = 0;
2685 }
2686
2687 /*
2688  * Softirqs were disabled:
2689  */
2690 void trace_softirqs_off(unsigned long ip)
2691 {
2692         struct task_struct *curr = current;
2693
2694         if (unlikely(!debug_locks || current->lockdep_recursion))
2695                 return;
2696
2697         /*
2698          * We fancy IRQs being disabled here, see softirq.c
2699          */
2700         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2701                 return;
2702
2703         if (curr->softirqs_enabled) {
2704                 /*
2705                  * We have done an ON -> OFF transition:
2706                  */
2707                 curr->softirqs_enabled = 0;
2708                 curr->softirq_disable_ip = ip;
2709                 curr->softirq_disable_event = ++curr->irq_events;
2710                 debug_atomic_inc(softirqs_off_events);
2711                 /*
2712                  * Whoops, we wanted softirqs off, so why aren't they?
2713                  */
2714                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2715         } else
2716                 debug_atomic_inc(redundant_softirqs_off);
2717 }
2718
2719 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2720 {
2721         struct task_struct *curr = current;
2722
2723         if (unlikely(!debug_locks))
2724                 return;
2725
2726         /* no reclaim without waiting on it */
2727         if (!(gfp_mask & __GFP_WAIT))
2728                 return;
2729
2730         /* this guy won't enter reclaim */
2731         if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2732                 return;
2733
2734         /* We're only interested __GFP_FS allocations for now */
2735         if (!(gfp_mask & __GFP_FS))
2736                 return;
2737
2738         /*
2739          * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2740          */
2741         if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2742                 return;
2743
2744         mark_held_locks(curr, RECLAIM_FS);
2745 }
2746
2747 static void check_flags(unsigned long flags);
2748
2749 void lockdep_trace_alloc(gfp_t gfp_mask)
2750 {
2751         unsigned long flags;
2752
2753         if (unlikely(current->lockdep_recursion))
2754                 return;
2755
2756         raw_local_irq_save(flags);
2757         check_flags(flags);
2758         current->lockdep_recursion = 1;
2759         __lockdep_trace_alloc(gfp_mask, flags);
2760         current->lockdep_recursion = 0;
2761         raw_local_irq_restore(flags);
2762 }
2763
2764 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2765 {
2766         /*
2767          * If non-trylock use in a hardirq or softirq context, then
2768          * mark the lock as used in these contexts:
2769          */
2770         if (!hlock->trylock) {
2771                 if (hlock->read) {
2772                         if (curr->hardirq_context)
2773                                 if (!mark_lock(curr, hlock,
2774                                                 LOCK_USED_IN_HARDIRQ_READ))
2775                                         return 0;
2776                         if (curr->softirq_context)
2777                                 if (!mark_lock(curr, hlock,
2778                                                 LOCK_USED_IN_SOFTIRQ_READ))
2779                                         return 0;
2780                 } else {
2781                         if (curr->hardirq_context)
2782                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2783                                         return 0;
2784                         if (curr->softirq_context)
2785                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2786                                         return 0;
2787                 }
2788         }
2789         if (!hlock->hardirqs_off) {
2790                 if (hlock->read) {
2791                         if (!mark_lock(curr, hlock,
2792                                         LOCK_ENABLED_HARDIRQ_READ))
2793                                 return 0;
2794                         if (curr->softirqs_enabled)
2795                                 if (!mark_lock(curr, hlock,
2796                                                 LOCK_ENABLED_SOFTIRQ_READ))
2797                                         return 0;
2798                 } else {
2799                         if (!mark_lock(curr, hlock,
2800                                         LOCK_ENABLED_HARDIRQ))
2801                                 return 0;
2802                         if (curr->softirqs_enabled)
2803                                 if (!mark_lock(curr, hlock,
2804                                                 LOCK_ENABLED_SOFTIRQ))
2805                                         return 0;
2806                 }
2807         }
2808
2809         /*
2810          * We reuse the irq context infrastructure more broadly as a general
2811          * context checking code. This tests GFP_FS recursion (a lock taken
2812          * during reclaim for a GFP_FS allocation is held over a GFP_FS
2813          * allocation).
2814          */
2815         if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2816                 if (hlock->read) {
2817                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2818                                         return 0;
2819                 } else {
2820                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2821                                         return 0;
2822                 }
2823         }
2824
2825         return 1;
2826 }
2827
2828 static int separate_irq_context(struct task_struct *curr,
2829                 struct held_lock *hlock)
2830 {
2831         unsigned int depth = curr->lockdep_depth;
2832
2833         /*
2834          * Keep track of points where we cross into an interrupt context:
2835          */
2836         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2837                                 curr->softirq_context;
2838         if (depth) {
2839                 struct held_lock *prev_hlock;
2840
2841                 prev_hlock = curr->held_locks + depth-1;
2842                 /*
2843                  * If we cross into another context, reset the
2844                  * hash key (this also prevents the checking and the
2845                  * adding of the dependency to 'prev'):
2846                  */
2847                 if (prev_hlock->irq_context != hlock->irq_context)
2848                         return 1;
2849         }
2850         return 0;
2851 }
2852
2853 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2854
2855 static inline
2856 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2857                 enum lock_usage_bit new_bit)
2858 {
2859         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2860         return 1;
2861 }
2862
2863 static inline int mark_irqflags(struct task_struct *curr,
2864                 struct held_lock *hlock)
2865 {
2866         return 1;
2867 }
2868
2869 static inline int separate_irq_context(struct task_struct *curr,
2870                 struct held_lock *hlock)
2871 {
2872         return 0;
2873 }
2874
2875 void lockdep_trace_alloc(gfp_t gfp_mask)
2876 {
2877 }
2878
2879 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2880
2881 /*
2882  * Mark a lock with a usage bit, and validate the state transition:
2883  */
2884 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2885                              enum lock_usage_bit new_bit)
2886 {
2887         unsigned int new_mask = 1 << new_bit, ret = 1;
2888
2889         /*
2890          * If already set then do not dirty the cacheline,
2891          * nor do any checks:
2892          */
2893         if (likely(hlock_class(this)->usage_mask & new_mask))
2894                 return 1;
2895
2896         if (!graph_lock())
2897                 return 0;
2898         /*
2899          * Make sure we didn't race:
2900          */
2901         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2902                 graph_unlock();
2903                 return 1;
2904         }
2905
2906         hlock_class(this)->usage_mask |= new_mask;
2907
2908         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2909                 return 0;
2910
2911         switch (new_bit) {
2912 #define LOCKDEP_STATE(__STATE)                  \
2913         case LOCK_USED_IN_##__STATE:            \
2914         case LOCK_USED_IN_##__STATE##_READ:     \
2915         case LOCK_ENABLED_##__STATE:            \
2916         case LOCK_ENABLED_##__STATE##_READ:
2917 #include "lockdep_states.h"
2918 #undef LOCKDEP_STATE
2919                 ret = mark_lock_irq(curr, this, new_bit);
2920                 if (!ret)
2921                         return 0;
2922                 break;
2923         case LOCK_USED:
2924                 debug_atomic_dec(nr_unused_locks);
2925                 break;
2926         default:
2927                 if (!debug_locks_off_graph_unlock())
2928                         return 0;
2929                 WARN_ON(1);
2930                 return 0;
2931         }
2932
2933         graph_unlock();
2934
2935         /*
2936          * We must printk outside of the graph_lock:
2937          */
2938         if (ret == 2) {
2939                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2940                 print_lock(this);
2941                 print_irqtrace_events(curr);
2942                 dump_stack();
2943         }
2944
2945         return ret;
2946 }
2947
2948 /*
2949  * Initialize a lock instance's lock-class mapping info:
2950  */
2951 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2952                       struct lock_class_key *key, int subclass)
2953 {
2954         int i;
2955
2956         kmemcheck_mark_initialized(lock, sizeof(*lock));
2957
2958         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2959                 lock->class_cache[i] = NULL;
2960
2961 #ifdef CONFIG_LOCK_STAT
2962         lock->cpu = raw_smp_processor_id();
2963 #endif
2964
2965         /*
2966          * Can't be having no nameless bastards around this place!
2967          */
2968         if (DEBUG_LOCKS_WARN_ON(!name)) {
2969                 lock->name = "NULL";
2970                 return;
2971         }
2972
2973         lock->name = name;
2974
2975         /*
2976          * No key, no joy, we need to hash something.
2977          */
2978         if (DEBUG_LOCKS_WARN_ON(!key))
2979                 return;
2980         /*
2981          * Sanity check, the lock-class key must be persistent:
2982          */
2983         if (!static_obj(key)) {
2984                 printk("BUG: key %p not in .data!\n", key);
2985                 /*
2986                  * What it says above ^^^^^, I suggest you read it.
2987                  */
2988                 DEBUG_LOCKS_WARN_ON(1);
2989                 return;
2990         }
2991         lock->key = key;
2992
2993         if (unlikely(!debug_locks))
2994                 return;
2995
2996         if (subclass)
2997                 register_lock_class(lock, subclass, 1);
2998 }
2999 EXPORT_SYMBOL_GPL(lockdep_init_map);
3000
3001 struct lock_class_key __lockdep_no_validate__;
3002
3003 static int
3004 print_lock_nested_lock_not_held(struct task_struct *curr,
3005                                 struct held_lock *hlock,
3006                                 unsigned long ip)
3007 {
3008         if (!debug_locks_off())
3009                 return 0;
3010         if (debug_locks_silent)
3011                 return 0;
3012
3013         printk("\n");
3014         printk("==================================\n");
3015         printk("[ BUG: Nested lock was not taken ]\n");
3016         print_kernel_ident();
3017         printk("----------------------------------\n");
3018
3019         printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3020         print_lock(hlock);
3021
3022         printk("\nbut this task is not holding:\n");
3023         printk("%s\n", hlock->nest_lock->name);
3024
3025         printk("\nstack backtrace:\n");
3026         dump_stack();
3027
3028         printk("\nother info that might help us debug this:\n");
3029         lockdep_print_held_locks(curr);
3030
3031         printk("\nstack backtrace:\n");
3032         dump_stack();
3033
3034         return 0;
3035 }
3036
3037 static int __lock_is_held(struct lockdep_map *lock);
3038
3039 /*
3040  * This gets called for every mutex_lock*()/spin_lock*() operation.
3041  * We maintain the dependency maps and validate the locking attempt:
3042  */
3043 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3044                           int trylock, int read, int check, int hardirqs_off,
3045                           struct lockdep_map *nest_lock, unsigned long ip,
3046                           int references)
3047 {
3048         struct task_struct *curr = current;
3049         struct lock_class *class = NULL;
3050         struct held_lock *hlock;
3051         unsigned int depth, id;
3052         int chain_head = 0;
3053         int class_idx;
3054         u64 chain_key;
3055
3056         if (!prove_locking)
3057                 check = 1;
3058
3059         if (unlikely(!debug_locks))
3060                 return 0;
3061
3062         /*
3063          * Lockdep should run with IRQs disabled, otherwise we could
3064          * get an interrupt which would want to take locks, which would
3065          * end up in lockdep and have you got a head-ache already?
3066          */
3067         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3068                 return 0;
3069
3070         if (lock->key == &__lockdep_no_validate__)
3071                 check = 1;
3072
3073         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3074                 class = lock->class_cache[subclass];
3075         /*
3076          * Not cached?
3077          */
3078         if (unlikely(!class)) {
3079                 class = register_lock_class(lock, subclass, 0);
3080                 if (!class)
3081                         return 0;
3082         }
3083         atomic_inc((atomic_t *)&class->ops);
3084         if (very_verbose(class)) {
3085                 printk("\nacquire class [%p] %s", class->key, class->name);
3086                 if (class->name_version > 1)
3087                         printk("#%d", class->name_version);
3088                 printk("\n");
3089                 dump_stack();
3090         }
3091
3092         /*
3093          * Add the lock to the list of currently held locks.
3094          * (we dont increase the depth just yet, up until the
3095          * dependency checks are done)
3096          */
3097         depth = curr->lockdep_depth;
3098         /*
3099          * Ran out of static storage for our per-task lock stack again have we?
3100          */
3101         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3102                 return 0;
3103
3104         class_idx = class - lock_classes + 1;
3105
3106         if (depth) {
3107                 hlock = curr->held_locks + depth - 1;
3108                 if (hlock->class_idx == class_idx && nest_lock) {
3109                         if (hlock->references)
3110                                 hlock->references++;
3111                         else
3112                                 hlock->references = 2;
3113
3114                         return 1;
3115                 }
3116         }
3117
3118         hlock = curr->held_locks + depth;
3119         /*
3120          * Plain impossible, we just registered it and checked it weren't no
3121          * NULL like.. I bet this mushroom I ate was good!
3122          */
3123         if (DEBUG_LOCKS_WARN_ON(!class))
3124                 return 0;
3125         hlock->class_idx = class_idx;
3126         hlock->acquire_ip = ip;
3127         hlock->instance = lock;
3128         hlock->nest_lock = nest_lock;
3129         hlock->trylock = trylock;
3130         hlock->read = read;
3131         hlock->check = check;
3132         hlock->hardirqs_off = !!hardirqs_off;
3133         hlock->references = references;
3134 #ifdef CONFIG_LOCK_STAT
3135         hlock->waittime_stamp = 0;
3136         hlock->holdtime_stamp = lockstat_clock();
3137 #endif
3138
3139         if (check == 2 && !mark_irqflags(curr, hlock))
3140                 return 0;
3141
3142         /* mark it as used: */
3143         if (!mark_lock(curr, hlock, LOCK_USED))
3144                 return 0;
3145
3146         /*
3147          * Calculate the chain hash: it's the combined hash of all the
3148          * lock keys along the dependency chain. We save the hash value
3149          * at every step so that we can get the current hash easily
3150          * after unlock. The chain hash is then used to cache dependency
3151          * results.
3152          *
3153          * The 'key ID' is what is the most compact key value to drive
3154          * the hash, not class->key.
3155          */
3156         id = class - lock_classes;
3157         /*
3158          * Whoops, we did it again.. ran straight out of our static allocation.
3159          */
3160         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3161                 return 0;
3162
3163         chain_key = curr->curr_chain_key;
3164         if (!depth) {
3165                 /*
3166                  * How can we have a chain hash when we ain't got no keys?!
3167                  */
3168                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3169                         return 0;
3170                 chain_head = 1;
3171         }
3172
3173         hlock->prev_chain_key = chain_key;
3174         if (separate_irq_context(curr, hlock)) {
3175                 chain_key = 0;
3176                 chain_head = 1;
3177         }
3178         chain_key = iterate_chain_key(chain_key, id);
3179
3180         if (nest_lock && !__lock_is_held(nest_lock))
3181                 return print_lock_nested_lock_not_held(curr, hlock, ip);
3182
3183         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3184                 return 0;
3185
3186         curr->curr_chain_key = chain_key;
3187         curr->lockdep_depth++;
3188         check_chain_key(curr);
3189 #ifdef CONFIG_DEBUG_LOCKDEP
3190         if (unlikely(!debug_locks))
3191                 return 0;
3192 #endif
3193         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3194                 debug_locks_off();
3195                 printk("BUG: MAX_LOCK_DEPTH too low, depth: %i  max: %lu!\n",
3196                        curr->lockdep_depth, MAX_LOCK_DEPTH);
3197                 printk("turning off the locking correctness validator.\n");
3198                 printk("Attach output of /proc/lock_stat to bug report\n");
3199
3200                 lockdep_print_held_locks(current);
3201                 debug_show_all_locks();
3202                 dump_stack();
3203
3204                 return 0;
3205         }
3206
3207         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3208                 max_lockdep_depth = curr->lockdep_depth;
3209
3210         return 1;
3211 }
3212
3213 static int
3214 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3215                            unsigned long ip)
3216 {
3217         if (!debug_locks_off())
3218                 return 0;
3219         if (debug_locks_silent)
3220                 return 0;
3221
3222         printk("\n");
3223         printk("=====================================\n");
3224         printk("[ BUG: bad unlock balance detected! ]\n");
3225         print_kernel_ident();
3226         printk("-------------------------------------\n");
3227         printk("%s/%d is trying to release lock (",
3228                 curr->comm, task_pid_nr(curr));
3229         print_lockdep_cache(lock);
3230         printk(") at:\n");
3231         print_ip_sym(ip);
3232         printk("but there are no more locks to release!\n");
3233         printk("\nother info that might help us debug this:\n");
3234         lockdep_print_held_locks(curr);
3235
3236         printk("\nstack backtrace:\n");
3237         dump_stack();
3238
3239         return 0;
3240 }
3241
3242 /*
3243  * Common debugging checks for both nested and non-nested unlock:
3244  */
3245 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3246                         unsigned long ip)
3247 {
3248         if (unlikely(!debug_locks))
3249                 return 0;
3250         /*
3251          * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3252          */
3253         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3254                 return 0;
3255
3256         if (curr->lockdep_depth <= 0)
3257                 return print_unlock_imbalance_bug(curr, lock, ip);
3258
3259         return 1;
3260 }
3261
3262 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3263 {
3264         if (hlock->instance == lock)
3265                 return 1;
3266
3267         if (hlock->references) {
3268                 struct lock_class *class = lock->class_cache[0];
3269
3270                 if (!class)
3271                         class = look_up_lock_class(lock, 0);
3272
3273                 /*
3274                  * If look_up_lock_class() failed to find a class, we're trying
3275                  * to test if we hold a lock that has never yet been acquired.
3276                  * Clearly if the lock hasn't been acquired _ever_, we're not
3277                  * holding it either, so report failure.
3278                  */
3279                 if (!class)
3280                         return 0;
3281
3282                 /*
3283                  * References, but not a lock we're actually ref-counting?
3284                  * State got messed up, follow the sites that change ->references
3285                  * and try to make sense of it.
3286                  */
3287                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3288                         return 0;
3289
3290                 if (hlock->class_idx == class - lock_classes + 1)
3291                         return 1;
3292         }
3293
3294         return 0;
3295 }
3296
3297 static int
3298 __lock_set_class(struct lockdep_map *lock, const char *name,
3299                  struct lock_class_key *key, unsigned int subclass,
3300                  unsigned long ip)
3301 {
3302         struct task_struct *curr = current;
3303         struct held_lock *hlock, *prev_hlock;
3304         struct lock_class *class;
3305         unsigned int depth;
3306         int i;
3307
3308         depth = curr->lockdep_depth;
3309         /*
3310          * This function is about (re)setting the class of a held lock,
3311          * yet we're not actually holding any locks. Naughty user!
3312          */
3313         if (DEBUG_LOCKS_WARN_ON(!depth))
3314                 return 0;
3315
3316         prev_hlock = NULL;
3317         for (i = depth-1; i >= 0; i--) {
3318                 hlock = curr->held_locks + i;
3319                 /*
3320                  * We must not cross into another context:
3321                  */
3322                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3323                         break;
3324                 if (match_held_lock(hlock, lock))
3325                         goto found_it;
3326                 prev_hlock = hlock;
3327         }
3328         return print_unlock_imbalance_bug(curr, lock, ip);
3329
3330 found_it:
3331         lockdep_init_map(lock, name, key, 0);
3332         class = register_lock_class(lock, subclass, 0);
3333         hlock->class_idx = class - lock_classes + 1;
3334
3335         curr->lockdep_depth = i;
3336         curr->curr_chain_key = hlock->prev_chain_key;
3337
3338         for (; i < depth; i++) {
3339                 hlock = curr->held_locks + i;
3340                 if (!__lock_acquire(hlock->instance,
3341                         hlock_class(hlock)->subclass, hlock->trylock,
3342                                 hlock->read, hlock->check, hlock->hardirqs_off,
3343                                 hlock->nest_lock, hlock->acquire_ip,
3344                                 hlock->references))
3345                         return 0;
3346         }
3347
3348         /*
3349          * I took it apart and put it back together again, except now I have
3350          * these 'spare' parts.. where shall I put them.
3351          */
3352         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3353                 return 0;
3354         return 1;
3355 }
3356
3357 /*
3358  * Remove the lock to the list of currently held locks in a
3359  * potentially non-nested (out of order) manner. This is a
3360  * relatively rare operation, as all the unlock APIs default
3361  * to nested mode (which uses lock_release()):
3362  */
3363 static int
3364 lock_release_non_nested(struct task_struct *curr,
3365                         struct lockdep_map *lock, unsigned long ip)
3366 {
3367         struct held_lock *hlock, *prev_hlock;
3368         unsigned int depth;
3369         int i;
3370
3371         /*
3372          * Check whether the lock exists in the current stack
3373          * of held locks:
3374          */
3375         depth = curr->lockdep_depth;
3376         /*
3377          * So we're all set to release this lock.. wait what lock? We don't
3378          * own any locks, you've been drinking again?
3379          */
3380         if (DEBUG_LOCKS_WARN_ON(!depth))
3381                 return 0;
3382
3383         prev_hlock = NULL;
3384         for (i = depth-1; i >= 0; i--) {
3385                 hlock = curr->held_locks + i;
3386                 /*
3387                  * We must not cross into another context:
3388                  */
3389                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3390                         break;
3391                 if (match_held_lock(hlock, lock))
3392                         goto found_it;
3393                 prev_hlock = hlock;
3394         }
3395         return print_unlock_imbalance_bug(curr, lock, ip);
3396
3397 found_it:
3398         if (hlock->instance == lock)
3399                 lock_release_holdtime(hlock);
3400
3401         if (hlock->references) {
3402                 hlock->references--;
3403                 if (hlock->references) {
3404                         /*
3405                          * We had, and after removing one, still have
3406                          * references, the current lock stack is still
3407                          * valid. We're done!
3408                          */
3409                         return 1;
3410                 }
3411         }
3412
3413         /*
3414          * We have the right lock to unlock, 'hlock' points to it.
3415          * Now we remove it from the stack, and add back the other
3416          * entries (if any), recalculating the hash along the way:
3417          */
3418
3419         curr->lockdep_depth = i;
3420         curr->curr_chain_key = hlock->prev_chain_key;
3421
3422         for (i++; i < depth; i++) {
3423                 hlock = curr->held_locks + i;
3424                 if (!__lock_acquire(hlock->instance,
3425                         hlock_class(hlock)->subclass, hlock->trylock,
3426                                 hlock->read, hlock->check, hlock->hardirqs_off,
3427                                 hlock->nest_lock, hlock->acquire_ip,
3428                                 hlock->references))
3429                         return 0;
3430         }
3431
3432         /*
3433          * We had N bottles of beer on the wall, we drank one, but now
3434          * there's not N-1 bottles of beer left on the wall...
3435          */
3436         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3437                 return 0;
3438         return 1;
3439 }
3440
3441 /*
3442  * Remove the lock to the list of currently held locks - this gets
3443  * called on mutex_unlock()/spin_unlock*() (or on a failed
3444  * mutex_lock_interruptible()). This is done for unlocks that nest
3445  * perfectly. (i.e. the current top of the lock-stack is unlocked)
3446  */
3447 static int lock_release_nested(struct task_struct *curr,
3448                                struct lockdep_map *lock, unsigned long ip)
3449 {
3450         struct held_lock *hlock;
3451         unsigned int depth;
3452
3453         /*
3454          * Pop off the top of the lock stack:
3455          */
3456         depth = curr->lockdep_depth - 1;
3457         hlock = curr->held_locks + depth;
3458
3459         /*
3460          * Is the unlock non-nested:
3461          */
3462         if (hlock->instance != lock || hlock->references)
3463                 return lock_release_non_nested(curr, lock, ip);
3464         curr->lockdep_depth--;
3465
3466         /*
3467          * No more locks, but somehow we've got hash left over, who left it?
3468          */
3469         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3470                 return 0;
3471
3472         curr->curr_chain_key = hlock->prev_chain_key;
3473
3474         lock_release_holdtime(hlock);
3475
3476 #ifdef CONFIG_DEBUG_LOCKDEP
3477         hlock->prev_chain_key = 0;
3478         hlock->class_idx = 0;
3479         hlock->acquire_ip = 0;
3480         hlock->irq_context = 0;
3481 #endif
3482         return 1;
3483 }
3484
3485 /*
3486  * Remove the lock to the list of currently held locks - this gets
3487  * called on mutex_unlock()/spin_unlock*() (or on a failed
3488  * mutex_lock_interruptible()). This is done for unlocks that nest
3489  * perfectly. (i.e. the current top of the lock-stack is unlocked)
3490  */
3491 static void
3492 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3493 {
3494         struct task_struct *curr = current;
3495
3496         if (!check_unlock(curr, lock, ip))
3497                 return;
3498
3499         if (nested) {
3500                 if (!lock_release_nested(curr, lock, ip))
3501                         return;
3502         } else {
3503                 if (!lock_release_non_nested(curr, lock, ip))
3504                         return;
3505         }
3506
3507         check_chain_key(curr);
3508 }
3509
3510 static int __lock_is_held(struct lockdep_map *lock)
3511 {
3512         struct task_struct *curr = current;
3513         int i;
3514
3515         for (i = 0; i < curr->lockdep_depth; i++) {
3516                 struct held_lock *hlock = curr->held_locks + i;
3517
3518                 if (match_held_lock(hlock, lock))
3519                         return 1;
3520         }
3521
3522         return 0;
3523 }
3524
3525 /*
3526  * Check whether we follow the irq-flags state precisely:
3527  */
3528 static void check_flags(unsigned long flags)
3529 {
3530 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3531     defined(CONFIG_TRACE_IRQFLAGS)
3532         if (!debug_locks)
3533                 return;
3534
3535         if (irqs_disabled_flags(flags)) {
3536                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3537                         printk("possible reason: unannotated irqs-off.\n");
3538                 }
3539         } else {
3540                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3541                         printk("possible reason: unannotated irqs-on.\n");
3542                 }
3543         }
3544
3545         /*
3546          * We dont accurately track softirq state in e.g.
3547          * hardirq contexts (such as on 4KSTACKS), so only
3548          * check if not in hardirq contexts:
3549          */
3550         if (!hardirq_count()) {
3551                 if (softirq_count()) {
3552                         /* like the above, but with softirqs */
3553                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3554                 } else {
3555                         /* lick the above, does it taste good? */
3556                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3557                 }
3558         }
3559
3560         if (!debug_locks)
3561                 print_irqtrace_events(current);
3562 #endif
3563 }
3564
3565 void lock_set_class(struct lockdep_map *lock, const char *name,
3566                     struct lock_class_key *key, unsigned int subclass,
3567                     unsigned long ip)
3568 {
3569         unsigned long flags;
3570
3571         if (unlikely(current->lockdep_recursion))
3572                 return;
3573
3574         raw_local_irq_save(flags);
3575         current->lockdep_recursion = 1;
3576         check_flags(flags);
3577         if (__lock_set_class(lock, name, key, subclass, ip))
3578                 check_chain_key(current);
3579         current->lockdep_recursion = 0;
3580         raw_local_irq_restore(flags);
3581 }
3582 EXPORT_SYMBOL_GPL(lock_set_class);
3583
3584 /*
3585  * We are not always called with irqs disabled - do that here,
3586  * and also avoid lockdep recursion:
3587  */
3588 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3589                           int trylock, int read, int check,
3590                           struct lockdep_map *nest_lock, unsigned long ip)
3591 {
3592         unsigned long flags;
3593
3594         if (unlikely(current->lockdep_recursion))
3595                 return;
3596
3597         raw_local_irq_save(flags);
3598         check_flags(flags);
3599
3600         current->lockdep_recursion = 1;
3601         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3602         __lock_acquire(lock, subclass, trylock, read, check,
3603                        irqs_disabled_flags(flags), nest_lock, ip, 0);
3604         current->lockdep_recursion = 0;
3605         raw_local_irq_restore(flags);
3606 }
3607 EXPORT_SYMBOL_GPL(lock_acquire);
3608
3609 void lock_release(struct lockdep_map *lock, int nested,
3610                           unsigned long ip)
3611 {
3612         unsigned long flags;
3613
3614         if (unlikely(current->lockdep_recursion))
3615                 return;
3616
3617         raw_local_irq_save(flags);
3618         check_flags(flags);
3619         current->lockdep_recursion = 1;
3620         trace_lock_release(lock, ip);
3621         __lock_release(lock, nested, ip);
3622         current->lockdep_recursion = 0;
3623         raw_local_irq_restore(flags);
3624 }
3625 EXPORT_SYMBOL_GPL(lock_release);
3626
3627 int lock_is_held(struct lockdep_map *lock)
3628 {
3629         unsigned long flags;
3630         int ret = 0;
3631
3632         if (unlikely(current->lockdep_recursion))
3633                 return 1; /* avoid false negative lockdep_assert_held() */
3634
3635         raw_local_irq_save(flags);
3636         check_flags(flags);
3637
3638         current->lockdep_recursion = 1;
3639         ret = __lock_is_held(lock);
3640         current->lockdep_recursion = 0;
3641         raw_local_irq_restore(flags);
3642
3643         return ret;
3644 }
3645 EXPORT_SYMBOL_GPL(lock_is_held);
3646
3647 void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3648 {
3649         current->lockdep_reclaim_gfp = gfp_mask;
3650 }
3651
3652 void lockdep_clear_current_reclaim_state(void)
3653 {
3654         current->lockdep_reclaim_gfp = 0;
3655 }
3656
3657 #ifdef CONFIG_LOCK_STAT
3658 static int
3659 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3660                            unsigned long ip)
3661 {
3662         if (!debug_locks_off())
3663                 return 0;
3664         if (debug_locks_silent)
3665                 return 0;
3666
3667         printk("\n");
3668         printk("=================================\n");
3669         printk("[ BUG: bad contention detected! ]\n");
3670         print_kernel_ident();
3671         printk("---------------------------------\n");
3672         printk("%s/%d is trying to contend lock (",
3673                 curr->comm, task_pid_nr(curr));
3674         print_lockdep_cache(lock);
3675         printk(") at:\n");
3676         print_ip_sym(ip);
3677         printk("but there are no locks held!\n");
3678         printk("\nother info that might help us debug this:\n");
3679         lockdep_print_held_locks(curr);
3680
3681         printk("\nstack backtrace:\n");
3682         dump_stack();
3683
3684         return 0;
3685 }
3686
3687 static void
3688 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3689 {
3690         struct task_struct *curr = current;
3691         struct held_lock *hlock, *prev_hlock;
3692         struct lock_class_stats *stats;
3693         unsigned int depth;
3694         int i, contention_point, contending_point;
3695
3696         depth = curr->lockdep_depth;
3697         /*
3698          * Whee, we contended on this lock, except it seems we're not
3699          * actually trying to acquire anything much at all..
3700          */
3701         if (DEBUG_LOCKS_WARN_ON(!depth))
3702                 return;
3703
3704         prev_hlock = NULL;
3705         for (i = depth-1; i >= 0; i--) {
3706                 hlock = curr->held_locks + i;
3707                 /*
3708                  * We must not cross into another context:
3709                  */
3710                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3711                         break;
3712                 if (match_held_lock(hlock, lock))
3713                         goto found_it;
3714                 prev_hlock = hlock;
3715         }
3716         print_lock_contention_bug(curr, lock, ip);
3717         return;
3718
3719 found_it:
3720         if (hlock->instance != lock)
3721                 return;
3722
3723         hlock->waittime_stamp = lockstat_clock();
3724
3725         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3726         contending_point = lock_point(hlock_class(hlock)->contending_point,
3727                                       lock->ip);
3728
3729         stats = get_lock_stats(hlock_class(hlock));
3730         if (contention_point < LOCKSTAT_POINTS)
3731                 stats->contention_point[contention_point]++;
3732         if (contending_point < LOCKSTAT_POINTS)
3733                 stats->contending_point[contending_point]++;
3734         if (lock->cpu != smp_processor_id())
3735                 stats->bounces[bounce_contended + !!hlock->read]++;
3736         put_lock_stats(stats);
3737 }
3738
3739 static void
3740 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3741 {
3742         struct task_struct *curr = current;
3743         struct held_lock *hlock, *prev_hlock;
3744         struct lock_class_stats *stats;
3745         unsigned int depth;
3746         u64 now, waittime = 0;
3747         int i, cpu;
3748
3749         depth = curr->lockdep_depth;
3750         /*
3751          * Yay, we acquired ownership of this lock we didn't try to
3752          * acquire, how the heck did that happen?
3753          */
3754         if (DEBUG_LOCKS_WARN_ON(!depth))
3755                 return;
3756
3757         prev_hlock = NULL;
3758         for (i = depth-1; i >= 0; i--) {
3759                 hlock = curr->held_locks + i;
3760                 /*
3761                  * We must not cross into another context:
3762                  */
3763                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3764                         break;
3765                 if (match_held_lock(hlock, lock))
3766                         goto found_it;
3767                 prev_hlock = hlock;
3768         }
3769         print_lock_contention_bug(curr, lock, _RET_IP_);
3770         return;
3771
3772 found_it:
3773         if (hlock->instance != lock)
3774                 return;
3775
3776         cpu = smp_processor_id();
3777         if (hlock->waittime_stamp) {
3778                 now = lockstat_clock();
3779                 waittime = now - hlock->waittime_stamp;
3780                 hlock->holdtime_stamp = now;
3781         }
3782
3783         trace_lock_acquired(lock, ip);
3784
3785         stats = get_lock_stats(hlock_class(hlock));
3786         if (waittime) {
3787                 if (hlock->read)
3788                         lock_time_inc(&stats->read_waittime, waittime);
3789                 else
3790                         lock_time_inc(&stats->write_waittime, waittime);
3791         }
3792         if (lock->cpu != cpu)
3793                 stats->bounces[bounce_acquired + !!hlock->read]++;
3794         put_lock_stats(stats);
3795
3796         lock->cpu = cpu;
3797         lock->ip = ip;
3798 }
3799
3800 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3801 {
3802         unsigned long flags;
3803
3804         if (unlikely(!lock_stat))
3805                 return;
3806
3807         if (unlikely(current->lockdep_recursion))
3808                 return;
3809
3810         raw_local_irq_save(flags);
3811         check_flags(flags);
3812         current->lockdep_recursion = 1;
3813         trace_lock_contended(lock, ip);
3814         __lock_contended(lock, ip);
3815         current->lockdep_recursion = 0;
3816         raw_local_irq_restore(flags);
3817 }
3818 EXPORT_SYMBOL_GPL(lock_contended);
3819
3820 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3821 {
3822         unsigned long flags;
3823
3824         if (unlikely(!lock_stat))
3825                 return;
3826
3827         if (unlikely(current->lockdep_recursion))
3828                 return;
3829
3830         raw_local_irq_save(flags);
3831         check_flags(flags);
3832         current->lockdep_recursion = 1;
3833         __lock_acquired(lock, ip);
3834         current->lockdep_recursion = 0;
3835         raw_local_irq_restore(flags);
3836 }
3837 EXPORT_SYMBOL_GPL(lock_acquired);
3838 #endif
3839
3840 /*
3841  * Used by the testsuite, sanitize the validator state
3842  * after a simulated failure:
3843  */
3844
3845 void lockdep_reset(void)
3846 {
3847         unsigned long flags;
3848         int i;
3849
3850         raw_local_irq_save(flags);
3851         current->curr_chain_key = 0;
3852         current->lockdep_depth = 0;
3853         current->lockdep_recursion = 0;
3854         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3855         nr_hardirq_chains = 0;
3856         nr_softirq_chains = 0;
3857         nr_process_chains = 0;
3858         debug_locks = 1;
3859         for (i = 0; i < CHAINHASH_SIZE; i++)
3860                 INIT_LIST_HEAD(chainhash_table + i);
3861         raw_local_irq_restore(flags);
3862 }
3863
3864 static void zap_class(struct lock_class *class)
3865 {
3866         int i;
3867
3868         /*
3869          * Remove all dependencies this lock is
3870          * involved in:
3871          */
3872         for (i = 0; i < nr_list_entries; i++) {
3873                 if (list_entries[i].class == class)
3874                         list_del_rcu(&list_entries[i].entry);
3875         }
3876         /*
3877          * Unhash the class and remove it from the all_lock_classes list:
3878          */
3879         list_del_rcu(&class->hash_entry);
3880         list_del_rcu(&class->lock_entry);
3881
3882         class->key = NULL;
3883 }
3884
3885 static inline int within(const void *addr, void *start, unsigned long size)
3886 {
3887         return addr >= start && addr < start + size;
3888 }
3889
3890 void lockdep_free_key_range(void *start, unsigned long size)
3891 {
3892         struct lock_class *class, *next;
3893         struct list_head *head;
3894         unsigned long flags;
3895         int i;
3896         int locked;
3897
3898         raw_local_irq_save(flags);
3899         locked = graph_lock();
3900
3901         /*
3902          * Unhash all classes that were created by this module:
3903          */
3904         for (i = 0; i < CLASSHASH_SIZE; i++) {
3905                 head = classhash_table + i;
3906                 if (list_empty(head))
3907                         continue;
3908                 list_for_each_entry_safe(class, next, head, hash_entry) {
3909                         if (within(class->key, start, size))
3910                                 zap_class(class);
3911                         else if (within(class->name, start, size))
3912                                 zap_class(class);
3913                 }
3914         }
3915
3916         if (locked)
3917                 graph_unlock();
3918         raw_local_irq_restore(flags);
3919 }
3920
3921 void lockdep_reset_lock(struct lockdep_map *lock)
3922 {
3923         struct lock_class *class, *next;
3924         struct list_head *head;
3925         unsigned long flags;
3926         int i, j;
3927         int locked;
3928
3929         raw_local_irq_save(flags);
3930
3931         /*
3932          * Remove all classes this lock might have:
3933          */
3934         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3935                 /*
3936                  * If the class exists we look it up and zap it:
3937                  */
3938                 class = look_up_lock_class(lock, j);
3939                 if (class)
3940                         zap_class(class);
3941         }
3942         /*
3943          * Debug check: in the end all mapped classes should
3944          * be gone.
3945          */
3946         locked = graph_lock();
3947         for (i = 0; i < CLASSHASH_SIZE; i++) {
3948                 head = classhash_table + i;
3949                 if (list_empty(head))
3950                         continue;
3951                 list_for_each_entry_safe(class, next, head, hash_entry) {
3952                         int match = 0;
3953
3954                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3955                                 match |= class == lock->class_cache[j];
3956
3957                         if (unlikely(match)) {
3958                                 if (debug_locks_off_graph_unlock()) {
3959                                         /*
3960                                          * We all just reset everything, how did it match?
3961                                          */
3962                                         WARN_ON(1);
3963                                 }
3964                                 goto out_restore;
3965                         }
3966                 }
3967         }
3968         if (locked)
3969                 graph_unlock();
3970
3971 out_restore:
3972         raw_local_irq_restore(flags);
3973 }
3974
3975 void lockdep_init(void)
3976 {
3977         int i;
3978
3979         /*
3980          * Some architectures have their own start_kernel()
3981          * code which calls lockdep_init(), while we also
3982          * call lockdep_init() from the start_kernel() itself,
3983          * and we want to initialize the hashes only once:
3984          */
3985         if (lockdep_initialized)
3986                 return;
3987
3988         for (i = 0; i < CLASSHASH_SIZE; i++)
3989                 INIT_LIST_HEAD(classhash_table + i);
3990
3991         for (i = 0; i < CHAINHASH_SIZE; i++)
3992                 INIT_LIST_HEAD(chainhash_table + i);
3993
3994         lockdep_initialized = 1;
3995 }
3996
3997 void __init lockdep_info(void)
3998 {
3999         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4000
4001         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4002         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4003         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4004         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4005         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4006         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4007         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4008
4009         printk(" memory used by lock dependency info: %lu kB\n",
4010                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4011                 sizeof(struct list_head) * CLASSHASH_SIZE +
4012                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4013                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4014                 sizeof(struct list_head) * CHAINHASH_SIZE
4015 #ifdef CONFIG_PROVE_LOCKING
4016                 + sizeof(struct circular_queue)
4017 #endif
4018                 ) / 1024
4019                 );
4020
4021         printk(" per task-struct memory footprint: %lu bytes\n",
4022                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4023
4024 #ifdef CONFIG_DEBUG_LOCKDEP
4025         if (lockdep_init_error) {
4026                 printk("WARNING: lockdep init error! lock-%s was acquired"
4027                         "before lockdep_init\n", lock_init_error);
4028                 printk("Call stack leading to lockdep invocation was:\n");
4029                 print_stack_trace(&lockdep_init_trace, 0);
4030         }
4031 #endif
4032 }
4033
4034 static void
4035 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4036                      const void *mem_to, struct held_lock *hlock)
4037 {
4038         if (!debug_locks_off())
4039                 return;
4040         if (debug_locks_silent)
4041                 return;
4042
4043         printk("\n");
4044         printk("=========================\n");
4045         printk("[ BUG: held lock freed! ]\n");
4046         print_kernel_ident();
4047         printk("-------------------------\n");
4048         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4049                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4050         print_lock(hlock);
4051         lockdep_print_held_locks(curr);
4052
4053         printk("\nstack backtrace:\n");
4054         dump_stack();
4055 }
4056
4057 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4058                                 const void* lock_from, unsigned long lock_len)
4059 {
4060         return lock_from + lock_len <= mem_from ||
4061                 mem_from + mem_len <= lock_from;
4062 }
4063
4064 /*
4065  * Called when kernel memory is freed (or unmapped), or if a lock
4066  * is destroyed or reinitialized - this code checks whether there is
4067  * any held lock in the memory range of <from> to <to>:
4068  */
4069 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4070 {
4071         struct task_struct *curr = current;
4072         struct held_lock *hlock;
4073         unsigned long flags;
4074         int i;
4075
4076         if (unlikely(!debug_locks))
4077                 return;
4078
4079         local_irq_save(flags);
4080         for (i = 0; i < curr->lockdep_depth; i++) {
4081                 hlock = curr->held_locks + i;
4082
4083                 if (not_in_range(mem_from, mem_len, hlock->instance,
4084                                         sizeof(*hlock->instance)))
4085                         continue;
4086
4087                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4088                 break;
4089         }
4090         local_irq_restore(flags);
4091 }
4092 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4093
4094 static void print_held_locks_bug(struct task_struct *curr)
4095 {
4096         if (!debug_locks_off())
4097                 return;
4098         if (debug_locks_silent)
4099                 return;
4100
4101         printk("\n");
4102         printk("=====================================\n");
4103         printk("[ BUG: lock held at task exit time! ]\n");
4104         print_kernel_ident();
4105         printk("-------------------------------------\n");
4106         printk("%s/%d is exiting with locks still held!\n",
4107                 curr->comm, task_pid_nr(curr));
4108         lockdep_print_held_locks(curr);
4109
4110         printk("\nstack backtrace:\n");
4111         dump_stack();
4112 }
4113
4114 void debug_check_no_locks_held(struct task_struct *task)
4115 {
4116         if (unlikely(task->lockdep_depth > 0))
4117                 print_held_locks_bug(task);
4118 }
4119
4120 void debug_show_all_locks(void)
4121 {
4122         struct task_struct *g, *p;
4123         int count = 10;
4124         int unlock = 1;
4125
4126         if (unlikely(!debug_locks)) {
4127                 printk("INFO: lockdep is turned off.\n");
4128                 return;
4129         }
4130         printk("\nShowing all locks held in the system:\n");
4131
4132         /*
4133          * Here we try to get the tasklist_lock as hard as possible,
4134          * if not successful after 2 seconds we ignore it (but keep
4135          * trying). This is to enable a debug printout even if a
4136          * tasklist_lock-holding task deadlocks or crashes.
4137          */
4138 retry:
4139         if (!read_trylock(&tasklist_lock)) {
4140                 if (count == 10)
4141                         printk("hm, tasklist_lock locked, retrying... ");
4142                 if (count) {
4143                         count--;
4144                         printk(" #%d", 10-count);
4145                         mdelay(200);
4146                         goto retry;
4147                 }
4148                 printk(" ignoring it.\n");
4149                 unlock = 0;
4150         } else {
4151                 if (count != 10)
4152                         printk(KERN_CONT " locked it.\n");
4153         }
4154
4155         do_each_thread(g, p) {
4156                 /*
4157                  * It's not reliable to print a task's held locks
4158                  * if it's not sleeping (or if it's not the current
4159                  * task):
4160                  */
4161                 if (p->state == TASK_RUNNING && p != current)
4162                         continue;
4163                 if (p->lockdep_depth)
4164                         lockdep_print_held_locks(p);
4165                 if (!unlock)
4166                         if (read_trylock(&tasklist_lock))
4167                                 unlock = 1;
4168         } while_each_thread(g, p);
4169
4170         printk("\n");
4171         printk("=============================================\n\n");
4172
4173         if (unlock)
4174                 read_unlock(&tasklist_lock);
4175 }
4176 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4177
4178 /*
4179  * Careful: only use this function if you are sure that
4180  * the task cannot run in parallel!
4181  */
4182 void debug_show_held_locks(struct task_struct *task)
4183 {
4184         if (unlikely(!debug_locks)) {
4185                 printk("INFO: lockdep is turned off.\n");
4186                 return;
4187         }
4188         lockdep_print_held_locks(task);
4189 }
4190 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4191
4192 void lockdep_sys_exit(void)
4193 {
4194         struct task_struct *curr = current;
4195
4196         if (unlikely(curr->lockdep_depth)) {
4197                 if (!debug_locks_off())
4198                         return;
4199                 printk("\n");
4200                 printk("================================================\n");
4201                 printk("[ BUG: lock held when returning to user space! ]\n");
4202                 print_kernel_ident();
4203                 printk("------------------------------------------------\n");
4204                 printk("%s/%d is leaving the kernel with locks still held!\n",
4205                                 curr->comm, curr->pid);
4206                 lockdep_print_held_locks(curr);
4207         }
4208 }
4209
4210 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4211 {
4212         struct task_struct *curr = current;
4213
4214 #ifndef CONFIG_PROVE_RCU_REPEATEDLY
4215         if (!debug_locks_off())
4216                 return;
4217 #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4218         /* Note: the following can be executed concurrently, so be careful. */
4219         printk("\n");
4220         printk("===============================\n");
4221         printk("[ INFO: suspicious RCU usage. ]\n");
4222         print_kernel_ident();
4223         printk("-------------------------------\n");
4224         printk("%s:%d %s!\n", file, line, s);
4225         printk("\nother info that might help us debug this:\n\n");
4226         printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4227                !rcu_lockdep_current_cpu_online()
4228                         ? "RCU used illegally from offline CPU!\n"
4229                         : rcu_is_cpu_idle()
4230                                 ? "RCU used illegally from idle CPU!\n"
4231                                 : "",
4232                rcu_scheduler_active, debug_locks);
4233
4234         /*
4235          * If a CPU is in the RCU-free window in idle (ie: in the section
4236          * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4237          * considers that CPU to be in an "extended quiescent state",
4238          * which means that RCU will be completely ignoring that CPU.
4239          * Therefore, rcu_read_lock() and friends have absolutely no
4240          * effect on a CPU running in that state. In other words, even if
4241          * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4242          * delete data structures out from under it.  RCU really has no
4243          * choice here: we need to keep an RCU-free window in idle where
4244          * the CPU may possibly enter into low power mode. This way we can
4245          * notice an extended quiescent state to other CPUs that started a grace
4246          * period. Otherwise we would delay any grace period as long as we run
4247          * in the idle task.
4248          *
4249          * So complain bitterly if someone does call rcu_read_lock(),
4250          * rcu_read_lock_bh() and so on from extended quiescent states.
4251          */
4252         if (rcu_is_cpu_idle())
4253                 printk("RCU used illegally from extended quiescent state!\n");
4254
4255         lockdep_print_held_locks(curr);
4256         printk("\nstack backtrace:\n");
4257         dump_stack();
4258 }
4259 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);