Merge commit 'v2.6.28-rc7' into core/locking
[linux-2.6.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/delay.h>
31 #include <linux/module.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/spinlock.h>
35 #include <linux/kallsyms.h>
36 #include <linux/interrupt.h>
37 #include <linux/stacktrace.h>
38 #include <linux/debug_locks.h>
39 #include <linux/irqflags.h>
40 #include <linux/utsname.h>
41 #include <linux/hash.h>
42 #include <linux/ftrace.h>
43
44 #include <asm/sections.h>
45
46 #include "lockdep_internals.h"
47
48 #ifdef CONFIG_PROVE_LOCKING
49 int prove_locking = 1;
50 module_param(prove_locking, int, 0644);
51 #else
52 #define prove_locking 0
53 #endif
54
55 #ifdef CONFIG_LOCK_STAT
56 int lock_stat = 1;
57 module_param(lock_stat, int, 0644);
58 #else
59 #define lock_stat 0
60 #endif
61
62 /*
63  * lockdep_lock: protects the lockdep graph, the hashes and the
64  *               class/list/hash allocators.
65  *
66  * This is one of the rare exceptions where it's justified
67  * to use a raw spinlock - we really dont want the spinlock
68  * code to recurse back into the lockdep code...
69  */
70 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
71
72 static int graph_lock(void)
73 {
74         __raw_spin_lock(&lockdep_lock);
75         /*
76          * Make sure that if another CPU detected a bug while
77          * walking the graph we dont change it (while the other
78          * CPU is busy printing out stuff with the graph lock
79          * dropped already)
80          */
81         if (!debug_locks) {
82                 __raw_spin_unlock(&lockdep_lock);
83                 return 0;
84         }
85         /* prevent any recursions within lockdep from causing deadlocks */
86         current->lockdep_recursion++;
87         return 1;
88 }
89
90 static inline int graph_unlock(void)
91 {
92         if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
93                 return DEBUG_LOCKS_WARN_ON(1);
94
95         current->lockdep_recursion--;
96         __raw_spin_unlock(&lockdep_lock);
97         return 0;
98 }
99
100 /*
101  * Turn lock debugging off and return with 0 if it was off already,
102  * and also release the graph lock:
103  */
104 static inline int debug_locks_off_graph_unlock(void)
105 {
106         int ret = debug_locks_off();
107
108         __raw_spin_unlock(&lockdep_lock);
109
110         return ret;
111 }
112
113 static int lockdep_initialized;
114
115 unsigned long nr_list_entries;
116 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
117
118 /*
119  * All data structures here are protected by the global debug_lock.
120  *
121  * Mutex key structs only get allocated, once during bootup, and never
122  * get freed - this significantly simplifies the debugging code.
123  */
124 unsigned long nr_lock_classes;
125 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
126
127 static inline struct lock_class *hlock_class(struct held_lock *hlock)
128 {
129         if (!hlock->class_idx) {
130                 DEBUG_LOCKS_WARN_ON(1);
131                 return NULL;
132         }
133         return lock_classes + hlock->class_idx - 1;
134 }
135
136 #ifdef CONFIG_LOCK_STAT
137 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
138
139 static int lock_point(unsigned long points[], unsigned long ip)
140 {
141         int i;
142
143         for (i = 0; i < LOCKSTAT_POINTS; i++) {
144                 if (points[i] == 0) {
145                         points[i] = ip;
146                         break;
147                 }
148                 if (points[i] == ip)
149                         break;
150         }
151
152         return i;
153 }
154
155 static void lock_time_inc(struct lock_time *lt, s64 time)
156 {
157         if (time > lt->max)
158                 lt->max = time;
159
160         if (time < lt->min || !lt->min)
161                 lt->min = time;
162
163         lt->total += time;
164         lt->nr++;
165 }
166
167 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
168 {
169         dst->min += src->min;
170         dst->max += src->max;
171         dst->total += src->total;
172         dst->nr += src->nr;
173 }
174
175 struct lock_class_stats lock_stats(struct lock_class *class)
176 {
177         struct lock_class_stats stats;
178         int cpu, i;
179
180         memset(&stats, 0, sizeof(struct lock_class_stats));
181         for_each_possible_cpu(cpu) {
182                 struct lock_class_stats *pcs =
183                         &per_cpu(lock_stats, cpu)[class - lock_classes];
184
185                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
186                         stats.contention_point[i] += pcs->contention_point[i];
187
188                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
189                         stats.contending_point[i] += pcs->contending_point[i];
190
191                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
192                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
193
194                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
195                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
196
197                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
198                         stats.bounces[i] += pcs->bounces[i];
199         }
200
201         return stats;
202 }
203
204 void clear_lock_stats(struct lock_class *class)
205 {
206         int cpu;
207
208         for_each_possible_cpu(cpu) {
209                 struct lock_class_stats *cpu_stats =
210                         &per_cpu(lock_stats, cpu)[class - lock_classes];
211
212                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
213         }
214         memset(class->contention_point, 0, sizeof(class->contention_point));
215         memset(class->contending_point, 0, sizeof(class->contending_point));
216 }
217
218 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
219 {
220         return &get_cpu_var(lock_stats)[class - lock_classes];
221 }
222
223 static void put_lock_stats(struct lock_class_stats *stats)
224 {
225         put_cpu_var(lock_stats);
226 }
227
228 static void lock_release_holdtime(struct held_lock *hlock)
229 {
230         struct lock_class_stats *stats;
231         s64 holdtime;
232
233         if (!lock_stat)
234                 return;
235
236         holdtime = sched_clock() - hlock->holdtime_stamp;
237
238         stats = get_lock_stats(hlock_class(hlock));
239         if (hlock->read)
240                 lock_time_inc(&stats->read_holdtime, holdtime);
241         else
242                 lock_time_inc(&stats->write_holdtime, holdtime);
243         put_lock_stats(stats);
244 }
245 #else
246 static inline void lock_release_holdtime(struct held_lock *hlock)
247 {
248 }
249 #endif
250
251 /*
252  * We keep a global list of all lock classes. The list only grows,
253  * never shrinks. The list is only accessed with the lockdep
254  * spinlock lock held.
255  */
256 LIST_HEAD(all_lock_classes);
257
258 /*
259  * The lockdep classes are in a hash-table as well, for fast lookup:
260  */
261 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
262 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
263 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
264 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
265
266 static struct list_head classhash_table[CLASSHASH_SIZE];
267
268 /*
269  * We put the lock dependency chains into a hash-table as well, to cache
270  * their existence:
271  */
272 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
273 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
274 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
275 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
276
277 static struct list_head chainhash_table[CHAINHASH_SIZE];
278
279 /*
280  * The hash key of the lock dependency chains is a hash itself too:
281  * it's a hash of all locks taken up to that lock, including that lock.
282  * It's a 64-bit hash, because it's important for the keys to be
283  * unique.
284  */
285 #define iterate_chain_key(key1, key2) \
286         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
287         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
288         (key2))
289
290 void lockdep_off(void)
291 {
292         current->lockdep_recursion++;
293 }
294
295 EXPORT_SYMBOL(lockdep_off);
296
297 void lockdep_on(void)
298 {
299         current->lockdep_recursion--;
300 }
301
302 EXPORT_SYMBOL(lockdep_on);
303
304 /*
305  * Debugging switches:
306  */
307
308 #define VERBOSE                 0
309 #define VERY_VERBOSE            0
310
311 #if VERBOSE
312 # define HARDIRQ_VERBOSE        1
313 # define SOFTIRQ_VERBOSE        1
314 #else
315 # define HARDIRQ_VERBOSE        0
316 # define SOFTIRQ_VERBOSE        0
317 #endif
318
319 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
320 /*
321  * Quick filtering for interesting events:
322  */
323 static int class_filter(struct lock_class *class)
324 {
325 #if 0
326         /* Example */
327         if (class->name_version == 1 &&
328                         !strcmp(class->name, "lockname"))
329                 return 1;
330         if (class->name_version == 1 &&
331                         !strcmp(class->name, "&struct->lockfield"))
332                 return 1;
333 #endif
334         /* Filter everything else. 1 would be to allow everything else */
335         return 0;
336 }
337 #endif
338
339 static int verbose(struct lock_class *class)
340 {
341 #if VERBOSE
342         return class_filter(class);
343 #endif
344         return 0;
345 }
346
347 /*
348  * Stack-trace: tightly packed array of stack backtrace
349  * addresses. Protected by the graph_lock.
350  */
351 unsigned long nr_stack_trace_entries;
352 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
353
354 static int save_trace(struct stack_trace *trace)
355 {
356         trace->nr_entries = 0;
357         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
358         trace->entries = stack_trace + nr_stack_trace_entries;
359
360         trace->skip = 3;
361
362         save_stack_trace(trace);
363
364         trace->max_entries = trace->nr_entries;
365
366         nr_stack_trace_entries += trace->nr_entries;
367
368         if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
369                 if (!debug_locks_off_graph_unlock())
370                         return 0;
371
372                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
373                 printk("turning off the locking correctness validator.\n");
374                 dump_stack();
375
376                 return 0;
377         }
378
379         return 1;
380 }
381
382 unsigned int nr_hardirq_chains;
383 unsigned int nr_softirq_chains;
384 unsigned int nr_process_chains;
385 unsigned int max_lockdep_depth;
386 unsigned int max_recursion_depth;
387
388 static unsigned int lockdep_dependency_gen_id;
389
390 static bool lockdep_dependency_visit(struct lock_class *source,
391                                      unsigned int depth)
392 {
393         if (!depth)
394                 lockdep_dependency_gen_id++;
395         if (source->dep_gen_id == lockdep_dependency_gen_id)
396                 return true;
397         source->dep_gen_id = lockdep_dependency_gen_id;
398         return false;
399 }
400
401 #ifdef CONFIG_DEBUG_LOCKDEP
402 /*
403  * We cannot printk in early bootup code. Not even early_printk()
404  * might work. So we mark any initialization errors and printk
405  * about it later on, in lockdep_info().
406  */
407 static int lockdep_init_error;
408 static unsigned long lockdep_init_trace_data[20];
409 static struct stack_trace lockdep_init_trace = {
410         .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
411         .entries = lockdep_init_trace_data,
412 };
413
414 /*
415  * Various lockdep statistics:
416  */
417 atomic_t chain_lookup_hits;
418 atomic_t chain_lookup_misses;
419 atomic_t hardirqs_on_events;
420 atomic_t hardirqs_off_events;
421 atomic_t redundant_hardirqs_on;
422 atomic_t redundant_hardirqs_off;
423 atomic_t softirqs_on_events;
424 atomic_t softirqs_off_events;
425 atomic_t redundant_softirqs_on;
426 atomic_t redundant_softirqs_off;
427 atomic_t nr_unused_locks;
428 atomic_t nr_cyclic_checks;
429 atomic_t nr_cyclic_check_recursions;
430 atomic_t nr_find_usage_forwards_checks;
431 atomic_t nr_find_usage_forwards_recursions;
432 atomic_t nr_find_usage_backwards_checks;
433 atomic_t nr_find_usage_backwards_recursions;
434 # define debug_atomic_inc(ptr)          atomic_inc(ptr)
435 # define debug_atomic_dec(ptr)          atomic_dec(ptr)
436 # define debug_atomic_read(ptr)         atomic_read(ptr)
437 #else
438 # define debug_atomic_inc(ptr)          do { } while (0)
439 # define debug_atomic_dec(ptr)          do { } while (0)
440 # define debug_atomic_read(ptr)         0
441 #endif
442
443 /*
444  * Locking printouts:
445  */
446
447 static const char *usage_str[] =
448 {
449         [LOCK_USED] =                   "initial-use ",
450         [LOCK_USED_IN_HARDIRQ] =        "in-hardirq-W",
451         [LOCK_USED_IN_SOFTIRQ] =        "in-softirq-W",
452         [LOCK_ENABLED_SOFTIRQS] =       "softirq-on-W",
453         [LOCK_ENABLED_HARDIRQS] =       "hardirq-on-W",
454         [LOCK_USED_IN_HARDIRQ_READ] =   "in-hardirq-R",
455         [LOCK_USED_IN_SOFTIRQ_READ] =   "in-softirq-R",
456         [LOCK_ENABLED_SOFTIRQS_READ] =  "softirq-on-R",
457         [LOCK_ENABLED_HARDIRQS_READ] =  "hardirq-on-R",
458 };
459
460 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
461 {
462         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
463 }
464
465 void
466 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
467 {
468         *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
469
470         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
471                 *c1 = '+';
472         else
473                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
474                         *c1 = '-';
475
476         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
477                 *c2 = '+';
478         else
479                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
480                         *c2 = '-';
481
482         if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
483                 *c3 = '-';
484         if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
485                 *c3 = '+';
486                 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
487                         *c3 = '?';
488         }
489
490         if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
491                 *c4 = '-';
492         if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
493                 *c4 = '+';
494                 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
495                         *c4 = '?';
496         }
497 }
498
499 static void print_lock_name(struct lock_class *class)
500 {
501         char str[KSYM_NAME_LEN], c1, c2, c3, c4;
502         const char *name;
503
504         get_usage_chars(class, &c1, &c2, &c3, &c4);
505
506         name = class->name;
507         if (!name) {
508                 name = __get_key_name(class->key, str);
509                 printk(" (%s", name);
510         } else {
511                 printk(" (%s", name);
512                 if (class->name_version > 1)
513                         printk("#%d", class->name_version);
514                 if (class->subclass)
515                         printk("/%d", class->subclass);
516         }
517         printk("){%c%c%c%c}", c1, c2, c3, c4);
518 }
519
520 static void print_lockdep_cache(struct lockdep_map *lock)
521 {
522         const char *name;
523         char str[KSYM_NAME_LEN];
524
525         name = lock->name;
526         if (!name)
527                 name = __get_key_name(lock->key->subkeys, str);
528
529         printk("%s", name);
530 }
531
532 static void print_lock(struct held_lock *hlock)
533 {
534         print_lock_name(hlock_class(hlock));
535         printk(", at: ");
536         print_ip_sym(hlock->acquire_ip);
537 }
538
539 static void lockdep_print_held_locks(struct task_struct *curr)
540 {
541         int i, depth = curr->lockdep_depth;
542
543         if (!depth) {
544                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
545                 return;
546         }
547         printk("%d lock%s held by %s/%d:\n",
548                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
549
550         for (i = 0; i < depth; i++) {
551                 printk(" #%d: ", i);
552                 print_lock(curr->held_locks + i);
553         }
554 }
555
556 static void print_lock_class_header(struct lock_class *class, int depth)
557 {
558         int bit;
559
560         printk("%*s->", depth, "");
561         print_lock_name(class);
562         printk(" ops: %lu", class->ops);
563         printk(" {\n");
564
565         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
566                 if (class->usage_mask & (1 << bit)) {
567                         int len = depth;
568
569                         len += printk("%*s   %s", depth, "", usage_str[bit]);
570                         len += printk(" at:\n");
571                         print_stack_trace(class->usage_traces + bit, len);
572                 }
573         }
574         printk("%*s }\n", depth, "");
575
576         printk("%*s ... key      at: ",depth,"");
577         print_ip_sym((unsigned long)class->key);
578 }
579
580 /*
581  * printk all lock dependencies starting at <entry>:
582  */
583 static void __used
584 print_lock_dependencies(struct lock_class *class, int depth)
585 {
586         struct lock_list *entry;
587
588         if (lockdep_dependency_visit(class, depth))
589                 return;
590
591         if (DEBUG_LOCKS_WARN_ON(depth >= 20))
592                 return;
593
594         print_lock_class_header(class, depth);
595
596         list_for_each_entry(entry, &class->locks_after, entry) {
597                 if (DEBUG_LOCKS_WARN_ON(!entry->class))
598                         return;
599
600                 print_lock_dependencies(entry->class, depth + 1);
601
602                 printk("%*s ... acquired at:\n",depth,"");
603                 print_stack_trace(&entry->trace, 2);
604                 printk("\n");
605         }
606 }
607
608 static void print_kernel_version(void)
609 {
610         printk("%s %.*s\n", init_utsname()->release,
611                 (int)strcspn(init_utsname()->version, " "),
612                 init_utsname()->version);
613 }
614
615 static int very_verbose(struct lock_class *class)
616 {
617 #if VERY_VERBOSE
618         return class_filter(class);
619 #endif
620         return 0;
621 }
622
623 /*
624  * Is this the address of a static object:
625  */
626 static int static_obj(void *obj)
627 {
628         unsigned long start = (unsigned long) &_stext,
629                       end   = (unsigned long) &_end,
630                       addr  = (unsigned long) obj;
631 #ifdef CONFIG_SMP
632         int i;
633 #endif
634
635         /*
636          * static variable?
637          */
638         if ((addr >= start) && (addr < end))
639                 return 1;
640
641 #ifdef CONFIG_SMP
642         /*
643          * percpu var?
644          */
645         for_each_possible_cpu(i) {
646                 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
647                 end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
648                                         + per_cpu_offset(i);
649
650                 if ((addr >= start) && (addr < end))
651                         return 1;
652         }
653 #endif
654
655         /*
656          * module var?
657          */
658         return is_module_address(addr);
659 }
660
661 /*
662  * To make lock name printouts unique, we calculate a unique
663  * class->name_version generation counter:
664  */
665 static int count_matching_names(struct lock_class *new_class)
666 {
667         struct lock_class *class;
668         int count = 0;
669
670         if (!new_class->name)
671                 return 0;
672
673         list_for_each_entry(class, &all_lock_classes, lock_entry) {
674                 if (new_class->key - new_class->subclass == class->key)
675                         return class->name_version;
676                 if (class->name && !strcmp(class->name, new_class->name))
677                         count = max(count, class->name_version);
678         }
679
680         return count + 1;
681 }
682
683 /*
684  * Register a lock's class in the hash-table, if the class is not present
685  * yet. Otherwise we look it up. We cache the result in the lock object
686  * itself, so actual lookup of the hash should be once per lock object.
687  */
688 static inline struct lock_class *
689 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
690 {
691         struct lockdep_subclass_key *key;
692         struct list_head *hash_head;
693         struct lock_class *class;
694
695 #ifdef CONFIG_DEBUG_LOCKDEP
696         /*
697          * If the architecture calls into lockdep before initializing
698          * the hashes then we'll warn about it later. (we cannot printk
699          * right now)
700          */
701         if (unlikely(!lockdep_initialized)) {
702                 lockdep_init();
703                 lockdep_init_error = 1;
704                 save_stack_trace(&lockdep_init_trace);
705         }
706 #endif
707
708         /*
709          * Static locks do not have their class-keys yet - for them the key
710          * is the lock object itself:
711          */
712         if (unlikely(!lock->key))
713                 lock->key = (void *)lock;
714
715         /*
716          * NOTE: the class-key must be unique. For dynamic locks, a static
717          * lock_class_key variable is passed in through the mutex_init()
718          * (or spin_lock_init()) call - which acts as the key. For static
719          * locks we use the lock object itself as the key.
720          */
721         BUILD_BUG_ON(sizeof(struct lock_class_key) >
722                         sizeof(struct lockdep_map));
723
724         key = lock->key->subkeys + subclass;
725
726         hash_head = classhashentry(key);
727
728         /*
729          * We can walk the hash lockfree, because the hash only
730          * grows, and we are careful when adding entries to the end:
731          */
732         list_for_each_entry(class, hash_head, hash_entry) {
733                 if (class->key == key) {
734                         WARN_ON_ONCE(class->name != lock->name);
735                         return class;
736                 }
737         }
738
739         return NULL;
740 }
741
742 /*
743  * Register a lock's class in the hash-table, if the class is not present
744  * yet. Otherwise we look it up. We cache the result in the lock object
745  * itself, so actual lookup of the hash should be once per lock object.
746  */
747 static inline struct lock_class *
748 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
749 {
750         struct lockdep_subclass_key *key;
751         struct list_head *hash_head;
752         struct lock_class *class;
753         unsigned long flags;
754
755         class = look_up_lock_class(lock, subclass);
756         if (likely(class))
757                 return class;
758
759         /*
760          * Debug-check: all keys must be persistent!
761          */
762         if (!static_obj(lock->key)) {
763                 debug_locks_off();
764                 printk("INFO: trying to register non-static key.\n");
765                 printk("the code is fine but needs lockdep annotation.\n");
766                 printk("turning off the locking correctness validator.\n");
767                 dump_stack();
768
769                 return NULL;
770         }
771
772         key = lock->key->subkeys + subclass;
773         hash_head = classhashentry(key);
774
775         raw_local_irq_save(flags);
776         if (!graph_lock()) {
777                 raw_local_irq_restore(flags);
778                 return NULL;
779         }
780         /*
781          * We have to do the hash-walk again, to avoid races
782          * with another CPU:
783          */
784         list_for_each_entry(class, hash_head, hash_entry)
785                 if (class->key == key)
786                         goto out_unlock_set;
787         /*
788          * Allocate a new key from the static array, and add it to
789          * the hash:
790          */
791         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
792                 if (!debug_locks_off_graph_unlock()) {
793                         raw_local_irq_restore(flags);
794                         return NULL;
795                 }
796                 raw_local_irq_restore(flags);
797
798                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
799                 printk("turning off the locking correctness validator.\n");
800                 return NULL;
801         }
802         class = lock_classes + nr_lock_classes++;
803         debug_atomic_inc(&nr_unused_locks);
804         class->key = key;
805         class->name = lock->name;
806         class->subclass = subclass;
807         INIT_LIST_HEAD(&class->lock_entry);
808         INIT_LIST_HEAD(&class->locks_before);
809         INIT_LIST_HEAD(&class->locks_after);
810         class->name_version = count_matching_names(class);
811         /*
812          * We use RCU's safe list-add method to make
813          * parallel walking of the hash-list safe:
814          */
815         list_add_tail_rcu(&class->hash_entry, hash_head);
816         /*
817          * Add it to the global list of classes:
818          */
819         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
820
821         if (verbose(class)) {
822                 graph_unlock();
823                 raw_local_irq_restore(flags);
824
825                 printk("\nnew class %p: %s", class->key, class->name);
826                 if (class->name_version > 1)
827                         printk("#%d", class->name_version);
828                 printk("\n");
829                 dump_stack();
830
831                 raw_local_irq_save(flags);
832                 if (!graph_lock()) {
833                         raw_local_irq_restore(flags);
834                         return NULL;
835                 }
836         }
837 out_unlock_set:
838         graph_unlock();
839         raw_local_irq_restore(flags);
840
841         if (!subclass || force)
842                 lock->class_cache = class;
843
844         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
845                 return NULL;
846
847         return class;
848 }
849
850 #ifdef CONFIG_PROVE_LOCKING
851 /*
852  * Allocate a lockdep entry. (assumes the graph_lock held, returns
853  * with NULL on failure)
854  */
855 static struct lock_list *alloc_list_entry(void)
856 {
857         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
858                 if (!debug_locks_off_graph_unlock())
859                         return NULL;
860
861                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
862                 printk("turning off the locking correctness validator.\n");
863                 return NULL;
864         }
865         return list_entries + nr_list_entries++;
866 }
867
868 /*
869  * Add a new dependency to the head of the list:
870  */
871 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
872                             struct list_head *head, unsigned long ip, int distance)
873 {
874         struct lock_list *entry;
875         /*
876          * Lock not present yet - get a new dependency struct and
877          * add it to the list:
878          */
879         entry = alloc_list_entry();
880         if (!entry)
881                 return 0;
882
883         if (!save_trace(&entry->trace))
884                 return 0;
885
886         entry->class = this;
887         entry->distance = distance;
888         /*
889          * Since we never remove from the dependency list, the list can
890          * be walked lockless by other CPUs, it's only allocation
891          * that must be protected by the spinlock. But this also means
892          * we must make new entries visible only once writes to the
893          * entry become visible - hence the RCU op:
894          */
895         list_add_tail_rcu(&entry->entry, head);
896
897         return 1;
898 }
899
900 /*
901  * Recursive, forwards-direction lock-dependency checking, used for
902  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
903  * checking.
904  *
905  * (to keep the stackframe of the recursive functions small we
906  *  use these global variables, and we also mark various helper
907  *  functions as noinline.)
908  */
909 static struct held_lock *check_source, *check_target;
910
911 /*
912  * Print a dependency chain entry (this is only done when a deadlock
913  * has been detected):
914  */
915 static noinline int
916 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
917 {
918         if (debug_locks_silent)
919                 return 0;
920         printk("\n-> #%u", depth);
921         print_lock_name(target->class);
922         printk(":\n");
923         print_stack_trace(&target->trace, 6);
924
925         return 0;
926 }
927
928 /*
929  * When a circular dependency is detected, print the
930  * header first:
931  */
932 static noinline int
933 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
934 {
935         struct task_struct *curr = current;
936
937         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
938                 return 0;
939
940         printk("\n=======================================================\n");
941         printk(  "[ INFO: possible circular locking dependency detected ]\n");
942         print_kernel_version();
943         printk(  "-------------------------------------------------------\n");
944         printk("%s/%d is trying to acquire lock:\n",
945                 curr->comm, task_pid_nr(curr));
946         print_lock(check_source);
947         printk("\nbut task is already holding lock:\n");
948         print_lock(check_target);
949         printk("\nwhich lock already depends on the new lock.\n\n");
950         printk("\nthe existing dependency chain (in reverse order) is:\n");
951
952         print_circular_bug_entry(entry, depth);
953
954         return 0;
955 }
956
957 static noinline int print_circular_bug_tail(void)
958 {
959         struct task_struct *curr = current;
960         struct lock_list this;
961
962         if (debug_locks_silent)
963                 return 0;
964
965         this.class = hlock_class(check_source);
966         if (!save_trace(&this.trace))
967                 return 0;
968
969         print_circular_bug_entry(&this, 0);
970
971         printk("\nother info that might help us debug this:\n\n");
972         lockdep_print_held_locks(curr);
973
974         printk("\nstack backtrace:\n");
975         dump_stack();
976
977         return 0;
978 }
979
980 #define RECURSION_LIMIT 40
981
982 static int noinline print_infinite_recursion_bug(void)
983 {
984         if (!debug_locks_off_graph_unlock())
985                 return 0;
986
987         WARN_ON(1);
988
989         return 0;
990 }
991
992 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
993                                            unsigned int depth)
994 {
995         struct lock_list *entry;
996         unsigned long ret = 1;
997
998         if (lockdep_dependency_visit(class, depth))
999                 return 0;
1000
1001         /*
1002          * Recurse this class's dependency list:
1003          */
1004         list_for_each_entry(entry, &class->locks_after, entry)
1005                 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1006
1007         return ret;
1008 }
1009
1010 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1011 {
1012         unsigned long ret, flags;
1013
1014         local_irq_save(flags);
1015         __raw_spin_lock(&lockdep_lock);
1016         ret = __lockdep_count_forward_deps(class, 0);
1017         __raw_spin_unlock(&lockdep_lock);
1018         local_irq_restore(flags);
1019
1020         return ret;
1021 }
1022
1023 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1024                                             unsigned int depth)
1025 {
1026         struct lock_list *entry;
1027         unsigned long ret = 1;
1028
1029         if (lockdep_dependency_visit(class, depth))
1030                 return 0;
1031         /*
1032          * Recurse this class's dependency list:
1033          */
1034         list_for_each_entry(entry, &class->locks_before, entry)
1035                 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1036
1037         return ret;
1038 }
1039
1040 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1041 {
1042         unsigned long ret, flags;
1043
1044         local_irq_save(flags);
1045         __raw_spin_lock(&lockdep_lock);
1046         ret = __lockdep_count_backward_deps(class, 0);
1047         __raw_spin_unlock(&lockdep_lock);
1048         local_irq_restore(flags);
1049
1050         return ret;
1051 }
1052
1053 /*
1054  * Prove that the dependency graph starting at <entry> can not
1055  * lead to <target>. Print an error and return 0 if it does.
1056  */
1057 static noinline int
1058 check_noncircular(struct lock_class *source, unsigned int depth)
1059 {
1060         struct lock_list *entry;
1061
1062         if (lockdep_dependency_visit(source, depth))
1063                 return 1;
1064
1065         debug_atomic_inc(&nr_cyclic_check_recursions);
1066         if (depth > max_recursion_depth)
1067                 max_recursion_depth = depth;
1068         if (depth >= RECURSION_LIMIT)
1069                 return print_infinite_recursion_bug();
1070         /*
1071          * Check this lock's dependency list:
1072          */
1073         list_for_each_entry(entry, &source->locks_after, entry) {
1074                 if (entry->class == hlock_class(check_target))
1075                         return print_circular_bug_header(entry, depth+1);
1076                 debug_atomic_inc(&nr_cyclic_checks);
1077                 if (!check_noncircular(entry->class, depth+1))
1078                         return print_circular_bug_entry(entry, depth+1);
1079         }
1080         return 1;
1081 }
1082
1083 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1084 /*
1085  * Forwards and backwards subgraph searching, for the purposes of
1086  * proving that two subgraphs can be connected by a new dependency
1087  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1088  */
1089 static enum lock_usage_bit find_usage_bit;
1090 static struct lock_class *forwards_match, *backwards_match;
1091
1092 /*
1093  * Find a node in the forwards-direction dependency sub-graph starting
1094  * at <source> that matches <find_usage_bit>.
1095  *
1096  * Return 2 if such a node exists in the subgraph, and put that node
1097  * into <forwards_match>.
1098  *
1099  * Return 1 otherwise and keep <forwards_match> unchanged.
1100  * Return 0 on error.
1101  */
1102 static noinline int
1103 find_usage_forwards(struct lock_class *source, unsigned int depth)
1104 {
1105         struct lock_list *entry;
1106         int ret;
1107
1108         if (lockdep_dependency_visit(source, depth))
1109                 return 1;
1110
1111         if (depth > max_recursion_depth)
1112                 max_recursion_depth = depth;
1113         if (depth >= RECURSION_LIMIT)
1114                 return print_infinite_recursion_bug();
1115
1116         debug_atomic_inc(&nr_find_usage_forwards_checks);
1117         if (source->usage_mask & (1 << find_usage_bit)) {
1118                 forwards_match = source;
1119                 return 2;
1120         }
1121
1122         /*
1123          * Check this lock's dependency list:
1124          */
1125         list_for_each_entry(entry, &source->locks_after, entry) {
1126                 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1127                 ret = find_usage_forwards(entry->class, depth+1);
1128                 if (ret == 2 || ret == 0)
1129                         return ret;
1130         }
1131         return 1;
1132 }
1133
1134 /*
1135  * Find a node in the backwards-direction dependency sub-graph starting
1136  * at <source> that matches <find_usage_bit>.
1137  *
1138  * Return 2 if such a node exists in the subgraph, and put that node
1139  * into <backwards_match>.
1140  *
1141  * Return 1 otherwise and keep <backwards_match> unchanged.
1142  * Return 0 on error.
1143  */
1144 static noinline int
1145 find_usage_backwards(struct lock_class *source, unsigned int depth)
1146 {
1147         struct lock_list *entry;
1148         int ret;
1149
1150         if (lockdep_dependency_visit(source, depth))
1151                 return 1;
1152
1153         if (!__raw_spin_is_locked(&lockdep_lock))
1154                 return DEBUG_LOCKS_WARN_ON(1);
1155
1156         if (depth > max_recursion_depth)
1157                 max_recursion_depth = depth;
1158         if (depth >= RECURSION_LIMIT)
1159                 return print_infinite_recursion_bug();
1160
1161         debug_atomic_inc(&nr_find_usage_backwards_checks);
1162         if (source->usage_mask & (1 << find_usage_bit)) {
1163                 backwards_match = source;
1164                 return 2;
1165         }
1166
1167         if (!source && debug_locks_off_graph_unlock()) {
1168                 WARN_ON(1);
1169                 return 0;
1170         }
1171
1172         /*
1173          * Check this lock's dependency list:
1174          */
1175         list_for_each_entry(entry, &source->locks_before, entry) {
1176                 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1177                 ret = find_usage_backwards(entry->class, depth+1);
1178                 if (ret == 2 || ret == 0)
1179                         return ret;
1180         }
1181         return 1;
1182 }
1183
1184 static int
1185 print_bad_irq_dependency(struct task_struct *curr,
1186                          struct held_lock *prev,
1187                          struct held_lock *next,
1188                          enum lock_usage_bit bit1,
1189                          enum lock_usage_bit bit2,
1190                          const char *irqclass)
1191 {
1192         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1193                 return 0;
1194
1195         printk("\n======================================================\n");
1196         printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1197                 irqclass, irqclass);
1198         print_kernel_version();
1199         printk(  "------------------------------------------------------\n");
1200         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1201                 curr->comm, task_pid_nr(curr),
1202                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1203                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1204                 curr->hardirqs_enabled,
1205                 curr->softirqs_enabled);
1206         print_lock(next);
1207
1208         printk("\nand this task is already holding:\n");
1209         print_lock(prev);
1210         printk("which would create a new lock dependency:\n");
1211         print_lock_name(hlock_class(prev));
1212         printk(" ->");
1213         print_lock_name(hlock_class(next));
1214         printk("\n");
1215
1216         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1217                 irqclass);
1218         print_lock_name(backwards_match);
1219         printk("\n... which became %s-irq-safe at:\n", irqclass);
1220
1221         print_stack_trace(backwards_match->usage_traces + bit1, 1);
1222
1223         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1224         print_lock_name(forwards_match);
1225         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1226         printk("...");
1227
1228         print_stack_trace(forwards_match->usage_traces + bit2, 1);
1229
1230         printk("\nother info that might help us debug this:\n\n");
1231         lockdep_print_held_locks(curr);
1232
1233         printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1234         print_lock_dependencies(backwards_match, 0);
1235
1236         printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1237         print_lock_dependencies(forwards_match, 0);
1238
1239         printk("\nstack backtrace:\n");
1240         dump_stack();
1241
1242         return 0;
1243 }
1244
1245 static int
1246 check_usage(struct task_struct *curr, struct held_lock *prev,
1247             struct held_lock *next, enum lock_usage_bit bit_backwards,
1248             enum lock_usage_bit bit_forwards, const char *irqclass)
1249 {
1250         int ret;
1251
1252         find_usage_bit = bit_backwards;
1253         /* fills in <backwards_match> */
1254         ret = find_usage_backwards(hlock_class(prev), 0);
1255         if (!ret || ret == 1)
1256                 return ret;
1257
1258         find_usage_bit = bit_forwards;
1259         ret = find_usage_forwards(hlock_class(next), 0);
1260         if (!ret || ret == 1)
1261                 return ret;
1262         /* ret == 2 */
1263         return print_bad_irq_dependency(curr, prev, next,
1264                         bit_backwards, bit_forwards, irqclass);
1265 }
1266
1267 static int
1268 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1269                 struct held_lock *next)
1270 {
1271         /*
1272          * Prove that the new dependency does not connect a hardirq-safe
1273          * lock with a hardirq-unsafe lock - to achieve this we search
1274          * the backwards-subgraph starting at <prev>, and the
1275          * forwards-subgraph starting at <next>:
1276          */
1277         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1278                                         LOCK_ENABLED_HARDIRQS, "hard"))
1279                 return 0;
1280
1281         /*
1282          * Prove that the new dependency does not connect a hardirq-safe-read
1283          * lock with a hardirq-unsafe lock - to achieve this we search
1284          * the backwards-subgraph starting at <prev>, and the
1285          * forwards-subgraph starting at <next>:
1286          */
1287         if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1288                                         LOCK_ENABLED_HARDIRQS, "hard-read"))
1289                 return 0;
1290
1291         /*
1292          * Prove that the new dependency does not connect a softirq-safe
1293          * lock with a softirq-unsafe lock - to achieve this we search
1294          * the backwards-subgraph starting at <prev>, and the
1295          * forwards-subgraph starting at <next>:
1296          */
1297         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1298                                         LOCK_ENABLED_SOFTIRQS, "soft"))
1299                 return 0;
1300         /*
1301          * Prove that the new dependency does not connect a softirq-safe-read
1302          * lock with a softirq-unsafe lock - to achieve this we search
1303          * the backwards-subgraph starting at <prev>, and the
1304          * forwards-subgraph starting at <next>:
1305          */
1306         if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1307                                         LOCK_ENABLED_SOFTIRQS, "soft"))
1308                 return 0;
1309
1310         return 1;
1311 }
1312
1313 static void inc_chains(void)
1314 {
1315         if (current->hardirq_context)
1316                 nr_hardirq_chains++;
1317         else {
1318                 if (current->softirq_context)
1319                         nr_softirq_chains++;
1320                 else
1321                         nr_process_chains++;
1322         }
1323 }
1324
1325 #else
1326
1327 static inline int
1328 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1329                 struct held_lock *next)
1330 {
1331         return 1;
1332 }
1333
1334 static inline void inc_chains(void)
1335 {
1336         nr_process_chains++;
1337 }
1338
1339 #endif
1340
1341 static int
1342 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1343                    struct held_lock *next)
1344 {
1345         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1346                 return 0;
1347
1348         printk("\n=============================================\n");
1349         printk(  "[ INFO: possible recursive locking detected ]\n");
1350         print_kernel_version();
1351         printk(  "---------------------------------------------\n");
1352         printk("%s/%d is trying to acquire lock:\n",
1353                 curr->comm, task_pid_nr(curr));
1354         print_lock(next);
1355         printk("\nbut task is already holding lock:\n");
1356         print_lock(prev);
1357
1358         printk("\nother info that might help us debug this:\n");
1359         lockdep_print_held_locks(curr);
1360
1361         printk("\nstack backtrace:\n");
1362         dump_stack();
1363
1364         return 0;
1365 }
1366
1367 /*
1368  * Check whether we are holding such a class already.
1369  *
1370  * (Note that this has to be done separately, because the graph cannot
1371  * detect such classes of deadlocks.)
1372  *
1373  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1374  */
1375 static int
1376 check_deadlock(struct task_struct *curr, struct held_lock *next,
1377                struct lockdep_map *next_instance, int read)
1378 {
1379         struct held_lock *prev;
1380         struct held_lock *nest = NULL;
1381         int i;
1382
1383         for (i = 0; i < curr->lockdep_depth; i++) {
1384                 prev = curr->held_locks + i;
1385
1386                 if (prev->instance == next->nest_lock)
1387                         nest = prev;
1388
1389                 if (hlock_class(prev) != hlock_class(next))
1390                         continue;
1391
1392                 /*
1393                  * Allow read-after-read recursion of the same
1394                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1395                  */
1396                 if ((read == 2) && prev->read)
1397                         return 2;
1398
1399                 /*
1400                  * We're holding the nest_lock, which serializes this lock's
1401                  * nesting behaviour.
1402                  */
1403                 if (nest)
1404                         return 2;
1405
1406                 return print_deadlock_bug(curr, prev, next);
1407         }
1408         return 1;
1409 }
1410
1411 /*
1412  * There was a chain-cache miss, and we are about to add a new dependency
1413  * to a previous lock. We recursively validate the following rules:
1414  *
1415  *  - would the adding of the <prev> -> <next> dependency create a
1416  *    circular dependency in the graph? [== circular deadlock]
1417  *
1418  *  - does the new prev->next dependency connect any hardirq-safe lock
1419  *    (in the full backwards-subgraph starting at <prev>) with any
1420  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1421  *    <next>)? [== illegal lock inversion with hardirq contexts]
1422  *
1423  *  - does the new prev->next dependency connect any softirq-safe lock
1424  *    (in the full backwards-subgraph starting at <prev>) with any
1425  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1426  *    <next>)? [== illegal lock inversion with softirq contexts]
1427  *
1428  * any of these scenarios could lead to a deadlock.
1429  *
1430  * Then if all the validations pass, we add the forwards and backwards
1431  * dependency.
1432  */
1433 static int
1434 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1435                struct held_lock *next, int distance)
1436 {
1437         struct lock_list *entry;
1438         int ret;
1439
1440         /*
1441          * Prove that the new <prev> -> <next> dependency would not
1442          * create a circular dependency in the graph. (We do this by
1443          * forward-recursing into the graph starting at <next>, and
1444          * checking whether we can reach <prev>.)
1445          *
1446          * We are using global variables to control the recursion, to
1447          * keep the stackframe size of the recursive functions low:
1448          */
1449         check_source = next;
1450         check_target = prev;
1451         if (!(check_noncircular(hlock_class(next), 0)))
1452                 return print_circular_bug_tail();
1453
1454         if (!check_prev_add_irq(curr, prev, next))
1455                 return 0;
1456
1457         /*
1458          * For recursive read-locks we do all the dependency checks,
1459          * but we dont store read-triggered dependencies (only
1460          * write-triggered dependencies). This ensures that only the
1461          * write-side dependencies matter, and that if for example a
1462          * write-lock never takes any other locks, then the reads are
1463          * equivalent to a NOP.
1464          */
1465         if (next->read == 2 || prev->read == 2)
1466                 return 1;
1467         /*
1468          * Is the <prev> -> <next> dependency already present?
1469          *
1470          * (this may occur even though this is a new chain: consider
1471          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1472          *  chains - the second one will be new, but L1 already has
1473          *  L2 added to its dependency list, due to the first chain.)
1474          */
1475         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1476                 if (entry->class == hlock_class(next)) {
1477                         if (distance == 1)
1478                                 entry->distance = 1;
1479                         return 2;
1480                 }
1481         }
1482
1483         /*
1484          * Ok, all validations passed, add the new lock
1485          * to the previous lock's dependency list:
1486          */
1487         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1488                                &hlock_class(prev)->locks_after,
1489                                next->acquire_ip, distance);
1490
1491         if (!ret)
1492                 return 0;
1493
1494         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1495                                &hlock_class(next)->locks_before,
1496                                next->acquire_ip, distance);
1497         if (!ret)
1498                 return 0;
1499
1500         /*
1501          * Debugging printouts:
1502          */
1503         if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1504                 graph_unlock();
1505                 printk("\n new dependency: ");
1506                 print_lock_name(hlock_class(prev));
1507                 printk(" => ");
1508                 print_lock_name(hlock_class(next));
1509                 printk("\n");
1510                 dump_stack();
1511                 return graph_lock();
1512         }
1513         return 1;
1514 }
1515
1516 /*
1517  * Add the dependency to all directly-previous locks that are 'relevant'.
1518  * The ones that are relevant are (in increasing distance from curr):
1519  * all consecutive trylock entries and the final non-trylock entry - or
1520  * the end of this context's lock-chain - whichever comes first.
1521  */
1522 static int
1523 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1524 {
1525         int depth = curr->lockdep_depth;
1526         struct held_lock *hlock;
1527
1528         /*
1529          * Debugging checks.
1530          *
1531          * Depth must not be zero for a non-head lock:
1532          */
1533         if (!depth)
1534                 goto out_bug;
1535         /*
1536          * At least two relevant locks must exist for this
1537          * to be a head:
1538          */
1539         if (curr->held_locks[depth].irq_context !=
1540                         curr->held_locks[depth-1].irq_context)
1541                 goto out_bug;
1542
1543         for (;;) {
1544                 int distance = curr->lockdep_depth - depth + 1;
1545                 hlock = curr->held_locks + depth-1;
1546                 /*
1547                  * Only non-recursive-read entries get new dependencies
1548                  * added:
1549                  */
1550                 if (hlock->read != 2) {
1551                         if (!check_prev_add(curr, hlock, next, distance))
1552                                 return 0;
1553                         /*
1554                          * Stop after the first non-trylock entry,
1555                          * as non-trylock entries have added their
1556                          * own direct dependencies already, so this
1557                          * lock is connected to them indirectly:
1558                          */
1559                         if (!hlock->trylock)
1560                                 break;
1561                 }
1562                 depth--;
1563                 /*
1564                  * End of lock-stack?
1565                  */
1566                 if (!depth)
1567                         break;
1568                 /*
1569                  * Stop the search if we cross into another context:
1570                  */
1571                 if (curr->held_locks[depth].irq_context !=
1572                                 curr->held_locks[depth-1].irq_context)
1573                         break;
1574         }
1575         return 1;
1576 out_bug:
1577         if (!debug_locks_off_graph_unlock())
1578                 return 0;
1579
1580         WARN_ON(1);
1581
1582         return 0;
1583 }
1584
1585 unsigned long nr_lock_chains;
1586 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1587 int nr_chain_hlocks;
1588 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1589
1590 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1591 {
1592         return lock_classes + chain_hlocks[chain->base + i];
1593 }
1594
1595 /*
1596  * Look up a dependency chain. If the key is not present yet then
1597  * add it and return 1 - in this case the new dependency chain is
1598  * validated. If the key is already hashed, return 0.
1599  * (On return with 1 graph_lock is held.)
1600  */
1601 static inline int lookup_chain_cache(struct task_struct *curr,
1602                                      struct held_lock *hlock,
1603                                      u64 chain_key)
1604 {
1605         struct lock_class *class = hlock_class(hlock);
1606         struct list_head *hash_head = chainhashentry(chain_key);
1607         struct lock_chain *chain;
1608         struct held_lock *hlock_curr, *hlock_next;
1609         int i, j, n, cn;
1610
1611         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1612                 return 0;
1613         /*
1614          * We can walk it lock-free, because entries only get added
1615          * to the hash:
1616          */
1617         list_for_each_entry(chain, hash_head, entry) {
1618                 if (chain->chain_key == chain_key) {
1619 cache_hit:
1620                         debug_atomic_inc(&chain_lookup_hits);
1621                         if (very_verbose(class))
1622                                 printk("\nhash chain already cached, key: "
1623                                         "%016Lx tail class: [%p] %s\n",
1624                                         (unsigned long long)chain_key,
1625                                         class->key, class->name);
1626                         return 0;
1627                 }
1628         }
1629         if (very_verbose(class))
1630                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1631                         (unsigned long long)chain_key, class->key, class->name);
1632         /*
1633          * Allocate a new chain entry from the static array, and add
1634          * it to the hash:
1635          */
1636         if (!graph_lock())
1637                 return 0;
1638         /*
1639          * We have to walk the chain again locked - to avoid duplicates:
1640          */
1641         list_for_each_entry(chain, hash_head, entry) {
1642                 if (chain->chain_key == chain_key) {
1643                         graph_unlock();
1644                         goto cache_hit;
1645                 }
1646         }
1647         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1648                 if (!debug_locks_off_graph_unlock())
1649                         return 0;
1650
1651                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1652                 printk("turning off the locking correctness validator.\n");
1653                 return 0;
1654         }
1655         chain = lock_chains + nr_lock_chains++;
1656         chain->chain_key = chain_key;
1657         chain->irq_context = hlock->irq_context;
1658         /* Find the first held_lock of current chain */
1659         hlock_next = hlock;
1660         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1661                 hlock_curr = curr->held_locks + i;
1662                 if (hlock_curr->irq_context != hlock_next->irq_context)
1663                         break;
1664                 hlock_next = hlock;
1665         }
1666         i++;
1667         chain->depth = curr->lockdep_depth + 1 - i;
1668         cn = nr_chain_hlocks;
1669         while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1670                 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1671                 if (n == cn)
1672                         break;
1673                 cn = n;
1674         }
1675         if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1676                 chain->base = cn;
1677                 for (j = 0; j < chain->depth - 1; j++, i++) {
1678                         int lock_id = curr->held_locks[i].class_idx - 1;
1679                         chain_hlocks[chain->base + j] = lock_id;
1680                 }
1681                 chain_hlocks[chain->base + j] = class - lock_classes;
1682         }
1683         list_add_tail_rcu(&chain->entry, hash_head);
1684         debug_atomic_inc(&chain_lookup_misses);
1685         inc_chains();
1686
1687         return 1;
1688 }
1689
1690 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1691                 struct held_lock *hlock, int chain_head, u64 chain_key)
1692 {
1693         /*
1694          * Trylock needs to maintain the stack of held locks, but it
1695          * does not add new dependencies, because trylock can be done
1696          * in any order.
1697          *
1698          * We look up the chain_key and do the O(N^2) check and update of
1699          * the dependencies only if this is a new dependency chain.
1700          * (If lookup_chain_cache() returns with 1 it acquires
1701          * graph_lock for us)
1702          */
1703         if (!hlock->trylock && (hlock->check == 2) &&
1704             lookup_chain_cache(curr, hlock, chain_key)) {
1705                 /*
1706                  * Check whether last held lock:
1707                  *
1708                  * - is irq-safe, if this lock is irq-unsafe
1709                  * - is softirq-safe, if this lock is hardirq-unsafe
1710                  *
1711                  * And check whether the new lock's dependency graph
1712                  * could lead back to the previous lock.
1713                  *
1714                  * any of these scenarios could lead to a deadlock. If
1715                  * All validations
1716                  */
1717                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1718
1719                 if (!ret)
1720                         return 0;
1721                 /*
1722                  * Mark recursive read, as we jump over it when
1723                  * building dependencies (just like we jump over
1724                  * trylock entries):
1725                  */
1726                 if (ret == 2)
1727                         hlock->read = 2;
1728                 /*
1729                  * Add dependency only if this lock is not the head
1730                  * of the chain, and if it's not a secondary read-lock:
1731                  */
1732                 if (!chain_head && ret != 2)
1733                         if (!check_prevs_add(curr, hlock))
1734                                 return 0;
1735                 graph_unlock();
1736         } else
1737                 /* after lookup_chain_cache(): */
1738                 if (unlikely(!debug_locks))
1739                         return 0;
1740
1741         return 1;
1742 }
1743 #else
1744 static inline int validate_chain(struct task_struct *curr,
1745                 struct lockdep_map *lock, struct held_lock *hlock,
1746                 int chain_head, u64 chain_key)
1747 {
1748         return 1;
1749 }
1750 #endif
1751
1752 /*
1753  * We are building curr_chain_key incrementally, so double-check
1754  * it from scratch, to make sure that it's done correctly:
1755  */
1756 static void check_chain_key(struct task_struct *curr)
1757 {
1758 #ifdef CONFIG_DEBUG_LOCKDEP
1759         struct held_lock *hlock, *prev_hlock = NULL;
1760         unsigned int i, id;
1761         u64 chain_key = 0;
1762
1763         for (i = 0; i < curr->lockdep_depth; i++) {
1764                 hlock = curr->held_locks + i;
1765                 if (chain_key != hlock->prev_chain_key) {
1766                         debug_locks_off();
1767                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1768                                 curr->lockdep_depth, i,
1769                                 (unsigned long long)chain_key,
1770                                 (unsigned long long)hlock->prev_chain_key);
1771                         return;
1772                 }
1773                 id = hlock->class_idx - 1;
1774                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1775                         return;
1776
1777                 if (prev_hlock && (prev_hlock->irq_context !=
1778                                                         hlock->irq_context))
1779                         chain_key = 0;
1780                 chain_key = iterate_chain_key(chain_key, id);
1781                 prev_hlock = hlock;
1782         }
1783         if (chain_key != curr->curr_chain_key) {
1784                 debug_locks_off();
1785                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1786                         curr->lockdep_depth, i,
1787                         (unsigned long long)chain_key,
1788                         (unsigned long long)curr->curr_chain_key);
1789         }
1790 #endif
1791 }
1792
1793 static int
1794 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1795                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1796 {
1797         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1798                 return 0;
1799
1800         printk("\n=================================\n");
1801         printk(  "[ INFO: inconsistent lock state ]\n");
1802         print_kernel_version();
1803         printk(  "---------------------------------\n");
1804
1805         printk("inconsistent {%s} -> {%s} usage.\n",
1806                 usage_str[prev_bit], usage_str[new_bit]);
1807
1808         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1809                 curr->comm, task_pid_nr(curr),
1810                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1811                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1812                 trace_hardirqs_enabled(curr),
1813                 trace_softirqs_enabled(curr));
1814         print_lock(this);
1815
1816         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1817         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1818
1819         print_irqtrace_events(curr);
1820         printk("\nother info that might help us debug this:\n");
1821         lockdep_print_held_locks(curr);
1822
1823         printk("\nstack backtrace:\n");
1824         dump_stack();
1825
1826         return 0;
1827 }
1828
1829 /*
1830  * Print out an error if an invalid bit is set:
1831  */
1832 static inline int
1833 valid_state(struct task_struct *curr, struct held_lock *this,
1834             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1835 {
1836         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1837                 return print_usage_bug(curr, this, bad_bit, new_bit);
1838         return 1;
1839 }
1840
1841 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1842                      enum lock_usage_bit new_bit);
1843
1844 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1845
1846 /*
1847  * print irq inversion bug:
1848  */
1849 static int
1850 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1851                         struct held_lock *this, int forwards,
1852                         const char *irqclass)
1853 {
1854         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1855                 return 0;
1856
1857         printk("\n=========================================================\n");
1858         printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1859         print_kernel_version();
1860         printk(  "---------------------------------------------------------\n");
1861         printk("%s/%d just changed the state of lock:\n",
1862                 curr->comm, task_pid_nr(curr));
1863         print_lock(this);
1864         if (forwards)
1865                 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1866         else
1867                 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1868         print_lock_name(other);
1869         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1870
1871         printk("\nother info that might help us debug this:\n");
1872         lockdep_print_held_locks(curr);
1873
1874         printk("\nthe first lock's dependencies:\n");
1875         print_lock_dependencies(hlock_class(this), 0);
1876
1877         printk("\nthe second lock's dependencies:\n");
1878         print_lock_dependencies(other, 0);
1879
1880         printk("\nstack backtrace:\n");
1881         dump_stack();
1882
1883         return 0;
1884 }
1885
1886 /*
1887  * Prove that in the forwards-direction subgraph starting at <this>
1888  * there is no lock matching <mask>:
1889  */
1890 static int
1891 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1892                      enum lock_usage_bit bit, const char *irqclass)
1893 {
1894         int ret;
1895
1896         find_usage_bit = bit;
1897         /* fills in <forwards_match> */
1898         ret = find_usage_forwards(hlock_class(this), 0);
1899         if (!ret || ret == 1)
1900                 return ret;
1901
1902         return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1903 }
1904
1905 /*
1906  * Prove that in the backwards-direction subgraph starting at <this>
1907  * there is no lock matching <mask>:
1908  */
1909 static int
1910 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1911                       enum lock_usage_bit bit, const char *irqclass)
1912 {
1913         int ret;
1914
1915         find_usage_bit = bit;
1916         /* fills in <backwards_match> */
1917         ret = find_usage_backwards(hlock_class(this), 0);
1918         if (!ret || ret == 1)
1919                 return ret;
1920
1921         return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1922 }
1923
1924 void print_irqtrace_events(struct task_struct *curr)
1925 {
1926         printk("irq event stamp: %u\n", curr->irq_events);
1927         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1928         print_ip_sym(curr->hardirq_enable_ip);
1929         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1930         print_ip_sym(curr->hardirq_disable_ip);
1931         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1932         print_ip_sym(curr->softirq_enable_ip);
1933         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1934         print_ip_sym(curr->softirq_disable_ip);
1935 }
1936
1937 static int hardirq_verbose(struct lock_class *class)
1938 {
1939 #if HARDIRQ_VERBOSE
1940         return class_filter(class);
1941 #endif
1942         return 0;
1943 }
1944
1945 static int softirq_verbose(struct lock_class *class)
1946 {
1947 #if SOFTIRQ_VERBOSE
1948         return class_filter(class);
1949 #endif
1950         return 0;
1951 }
1952
1953 #define STRICT_READ_CHECKS      1
1954
1955 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1956                 enum lock_usage_bit new_bit)
1957 {
1958         int ret = 1;
1959
1960         switch(new_bit) {
1961         case LOCK_USED_IN_HARDIRQ:
1962                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1963                         return 0;
1964                 if (!valid_state(curr, this, new_bit,
1965                                  LOCK_ENABLED_HARDIRQS_READ))
1966                         return 0;
1967                 /*
1968                  * just marked it hardirq-safe, check that this lock
1969                  * took no hardirq-unsafe lock in the past:
1970                  */
1971                 if (!check_usage_forwards(curr, this,
1972                                           LOCK_ENABLED_HARDIRQS, "hard"))
1973                         return 0;
1974 #if STRICT_READ_CHECKS
1975                 /*
1976                  * just marked it hardirq-safe, check that this lock
1977                  * took no hardirq-unsafe-read lock in the past:
1978                  */
1979                 if (!check_usage_forwards(curr, this,
1980                                 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1981                         return 0;
1982 #endif
1983                 if (hardirq_verbose(hlock_class(this)))
1984                         ret = 2;
1985                 break;
1986         case LOCK_USED_IN_SOFTIRQ:
1987                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1988                         return 0;
1989                 if (!valid_state(curr, this, new_bit,
1990                                  LOCK_ENABLED_SOFTIRQS_READ))
1991                         return 0;
1992                 /*
1993                  * just marked it softirq-safe, check that this lock
1994                  * took no softirq-unsafe lock in the past:
1995                  */
1996                 if (!check_usage_forwards(curr, this,
1997                                           LOCK_ENABLED_SOFTIRQS, "soft"))
1998                         return 0;
1999 #if STRICT_READ_CHECKS
2000                 /*
2001                  * just marked it softirq-safe, check that this lock
2002                  * took no softirq-unsafe-read lock in the past:
2003                  */
2004                 if (!check_usage_forwards(curr, this,
2005                                 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2006                         return 0;
2007 #endif
2008                 if (softirq_verbose(hlock_class(this)))
2009                         ret = 2;
2010                 break;
2011         case LOCK_USED_IN_HARDIRQ_READ:
2012                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2013                         return 0;
2014                 /*
2015                  * just marked it hardirq-read-safe, check that this lock
2016                  * took no hardirq-unsafe lock in the past:
2017                  */
2018                 if (!check_usage_forwards(curr, this,
2019                                           LOCK_ENABLED_HARDIRQS, "hard"))
2020                         return 0;
2021                 if (hardirq_verbose(hlock_class(this)))
2022                         ret = 2;
2023                 break;
2024         case LOCK_USED_IN_SOFTIRQ_READ:
2025                 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2026                         return 0;
2027                 /*
2028                  * just marked it softirq-read-safe, check that this lock
2029                  * took no softirq-unsafe lock in the past:
2030                  */
2031                 if (!check_usage_forwards(curr, this,
2032                                           LOCK_ENABLED_SOFTIRQS, "soft"))
2033                         return 0;
2034                 if (softirq_verbose(hlock_class(this)))
2035                         ret = 2;
2036                 break;
2037         case LOCK_ENABLED_HARDIRQS:
2038                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2039                         return 0;
2040                 if (!valid_state(curr, this, new_bit,
2041                                  LOCK_USED_IN_HARDIRQ_READ))
2042                         return 0;
2043                 /*
2044                  * just marked it hardirq-unsafe, check that no hardirq-safe
2045                  * lock in the system ever took it in the past:
2046                  */
2047                 if (!check_usage_backwards(curr, this,
2048                                            LOCK_USED_IN_HARDIRQ, "hard"))
2049                         return 0;
2050 #if STRICT_READ_CHECKS
2051                 /*
2052                  * just marked it hardirq-unsafe, check that no
2053                  * hardirq-safe-read lock in the system ever took
2054                  * it in the past:
2055                  */
2056                 if (!check_usage_backwards(curr, this,
2057                                    LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2058                         return 0;
2059 #endif
2060                 if (hardirq_verbose(hlock_class(this)))
2061                         ret = 2;
2062                 break;
2063         case LOCK_ENABLED_SOFTIRQS:
2064                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2065                         return 0;
2066                 if (!valid_state(curr, this, new_bit,
2067                                  LOCK_USED_IN_SOFTIRQ_READ))
2068                         return 0;
2069                 /*
2070                  * just marked it softirq-unsafe, check that no softirq-safe
2071                  * lock in the system ever took it in the past:
2072                  */
2073                 if (!check_usage_backwards(curr, this,
2074                                            LOCK_USED_IN_SOFTIRQ, "soft"))
2075                         return 0;
2076 #if STRICT_READ_CHECKS
2077                 /*
2078                  * just marked it softirq-unsafe, check that no
2079                  * softirq-safe-read lock in the system ever took
2080                  * it in the past:
2081                  */
2082                 if (!check_usage_backwards(curr, this,
2083                                    LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2084                         return 0;
2085 #endif
2086                 if (softirq_verbose(hlock_class(this)))
2087                         ret = 2;
2088                 break;
2089         case LOCK_ENABLED_HARDIRQS_READ:
2090                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2091                         return 0;
2092 #if STRICT_READ_CHECKS
2093                 /*
2094                  * just marked it hardirq-read-unsafe, check that no
2095                  * hardirq-safe lock in the system ever took it in the past:
2096                  */
2097                 if (!check_usage_backwards(curr, this,
2098                                            LOCK_USED_IN_HARDIRQ, "hard"))
2099                         return 0;
2100 #endif
2101                 if (hardirq_verbose(hlock_class(this)))
2102                         ret = 2;
2103                 break;
2104         case LOCK_ENABLED_SOFTIRQS_READ:
2105                 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2106                         return 0;
2107 #if STRICT_READ_CHECKS
2108                 /*
2109                  * just marked it softirq-read-unsafe, check that no
2110                  * softirq-safe lock in the system ever took it in the past:
2111                  */
2112                 if (!check_usage_backwards(curr, this,
2113                                            LOCK_USED_IN_SOFTIRQ, "soft"))
2114                         return 0;
2115 #endif
2116                 if (softirq_verbose(hlock_class(this)))
2117                         ret = 2;
2118                 break;
2119         default:
2120                 WARN_ON(1);
2121                 break;
2122         }
2123
2124         return ret;
2125 }
2126
2127 /*
2128  * Mark all held locks with a usage bit:
2129  */
2130 static int
2131 mark_held_locks(struct task_struct *curr, int hardirq)
2132 {
2133         enum lock_usage_bit usage_bit;
2134         struct held_lock *hlock;
2135         int i;
2136
2137         for (i = 0; i < curr->lockdep_depth; i++) {
2138                 hlock = curr->held_locks + i;
2139
2140                 if (hardirq) {
2141                         if (hlock->read)
2142                                 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2143                         else
2144                                 usage_bit = LOCK_ENABLED_HARDIRQS;
2145                 } else {
2146                         if (hlock->read)
2147                                 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2148                         else
2149                                 usage_bit = LOCK_ENABLED_SOFTIRQS;
2150                 }
2151                 if (!mark_lock(curr, hlock, usage_bit))
2152                         return 0;
2153         }
2154
2155         return 1;
2156 }
2157
2158 /*
2159  * Debugging helper: via this flag we know that we are in
2160  * 'early bootup code', and will warn about any invalid irqs-on event:
2161  */
2162 static int early_boot_irqs_enabled;
2163
2164 void early_boot_irqs_off(void)
2165 {
2166         early_boot_irqs_enabled = 0;
2167 }
2168
2169 void early_boot_irqs_on(void)
2170 {
2171         early_boot_irqs_enabled = 1;
2172 }
2173
2174 /*
2175  * Hardirqs will be enabled:
2176  */
2177 void trace_hardirqs_on_caller(unsigned long ip)
2178 {
2179         struct task_struct *curr = current;
2180
2181         time_hardirqs_on(CALLER_ADDR0, ip);
2182
2183         if (unlikely(!debug_locks || current->lockdep_recursion))
2184                 return;
2185
2186         if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2187                 return;
2188
2189         if (unlikely(curr->hardirqs_enabled)) {
2190                 debug_atomic_inc(&redundant_hardirqs_on);
2191                 return;
2192         }
2193         /* we'll do an OFF -> ON transition: */
2194         curr->hardirqs_enabled = 1;
2195
2196         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2197                 return;
2198         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2199                 return;
2200         /*
2201          * We are going to turn hardirqs on, so set the
2202          * usage bit for all held locks:
2203          */
2204         if (!mark_held_locks(curr, 1))
2205                 return;
2206         /*
2207          * If we have softirqs enabled, then set the usage
2208          * bit for all held locks. (disabled hardirqs prevented
2209          * this bit from being set before)
2210          */
2211         if (curr->softirqs_enabled)
2212                 if (!mark_held_locks(curr, 0))
2213                         return;
2214
2215         curr->hardirq_enable_ip = ip;
2216         curr->hardirq_enable_event = ++curr->irq_events;
2217         debug_atomic_inc(&hardirqs_on_events);
2218 }
2219 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2220
2221 void trace_hardirqs_on(void)
2222 {
2223         trace_hardirqs_on_caller(CALLER_ADDR0);
2224 }
2225 EXPORT_SYMBOL(trace_hardirqs_on);
2226
2227 /*
2228  * Hardirqs were disabled:
2229  */
2230 void trace_hardirqs_off_caller(unsigned long ip)
2231 {
2232         struct task_struct *curr = current;
2233
2234         time_hardirqs_off(CALLER_ADDR0, ip);
2235
2236         if (unlikely(!debug_locks || current->lockdep_recursion))
2237                 return;
2238
2239         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2240                 return;
2241
2242         if (curr->hardirqs_enabled) {
2243                 /*
2244                  * We have done an ON -> OFF transition:
2245                  */
2246                 curr->hardirqs_enabled = 0;
2247                 curr->hardirq_disable_ip = ip;
2248                 curr->hardirq_disable_event = ++curr->irq_events;
2249                 debug_atomic_inc(&hardirqs_off_events);
2250         } else
2251                 debug_atomic_inc(&redundant_hardirqs_off);
2252 }
2253 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2254
2255 void trace_hardirqs_off(void)
2256 {
2257         trace_hardirqs_off_caller(CALLER_ADDR0);
2258 }
2259 EXPORT_SYMBOL(trace_hardirqs_off);
2260
2261 /*
2262  * Softirqs will be enabled:
2263  */
2264 void trace_softirqs_on(unsigned long ip)
2265 {
2266         struct task_struct *curr = current;
2267
2268         if (unlikely(!debug_locks))
2269                 return;
2270
2271         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2272                 return;
2273
2274         if (curr->softirqs_enabled) {
2275                 debug_atomic_inc(&redundant_softirqs_on);
2276                 return;
2277         }
2278
2279         /*
2280          * We'll do an OFF -> ON transition:
2281          */
2282         curr->softirqs_enabled = 1;
2283         curr->softirq_enable_ip = ip;
2284         curr->softirq_enable_event = ++curr->irq_events;
2285         debug_atomic_inc(&softirqs_on_events);
2286         /*
2287          * We are going to turn softirqs on, so set the
2288          * usage bit for all held locks, if hardirqs are
2289          * enabled too:
2290          */
2291         if (curr->hardirqs_enabled)
2292                 mark_held_locks(curr, 0);
2293 }
2294
2295 /*
2296  * Softirqs were disabled:
2297  */
2298 void trace_softirqs_off(unsigned long ip)
2299 {
2300         struct task_struct *curr = current;
2301
2302         if (unlikely(!debug_locks))
2303                 return;
2304
2305         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2306                 return;
2307
2308         if (curr->softirqs_enabled) {
2309                 /*
2310                  * We have done an ON -> OFF transition:
2311                  */
2312                 curr->softirqs_enabled = 0;
2313                 curr->softirq_disable_ip = ip;
2314                 curr->softirq_disable_event = ++curr->irq_events;
2315                 debug_atomic_inc(&softirqs_off_events);
2316                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2317         } else
2318                 debug_atomic_inc(&redundant_softirqs_off);
2319 }
2320
2321 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2322 {
2323         /*
2324          * If non-trylock use in a hardirq or softirq context, then
2325          * mark the lock as used in these contexts:
2326          */
2327         if (!hlock->trylock) {
2328                 if (hlock->read) {
2329                         if (curr->hardirq_context)
2330                                 if (!mark_lock(curr, hlock,
2331                                                 LOCK_USED_IN_HARDIRQ_READ))
2332                                         return 0;
2333                         if (curr->softirq_context)
2334                                 if (!mark_lock(curr, hlock,
2335                                                 LOCK_USED_IN_SOFTIRQ_READ))
2336                                         return 0;
2337                 } else {
2338                         if (curr->hardirq_context)
2339                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2340                                         return 0;
2341                         if (curr->softirq_context)
2342                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2343                                         return 0;
2344                 }
2345         }
2346         if (!hlock->hardirqs_off) {
2347                 if (hlock->read) {
2348                         if (!mark_lock(curr, hlock,
2349                                         LOCK_ENABLED_HARDIRQS_READ))
2350                                 return 0;
2351                         if (curr->softirqs_enabled)
2352                                 if (!mark_lock(curr, hlock,
2353                                                 LOCK_ENABLED_SOFTIRQS_READ))
2354                                         return 0;
2355                 } else {
2356                         if (!mark_lock(curr, hlock,
2357                                         LOCK_ENABLED_HARDIRQS))
2358                                 return 0;
2359                         if (curr->softirqs_enabled)
2360                                 if (!mark_lock(curr, hlock,
2361                                                 LOCK_ENABLED_SOFTIRQS))
2362                                         return 0;
2363                 }
2364         }
2365
2366         return 1;
2367 }
2368
2369 static int separate_irq_context(struct task_struct *curr,
2370                 struct held_lock *hlock)
2371 {
2372         unsigned int depth = curr->lockdep_depth;
2373
2374         /*
2375          * Keep track of points where we cross into an interrupt context:
2376          */
2377         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2378                                 curr->softirq_context;
2379         if (depth) {
2380                 struct held_lock *prev_hlock;
2381
2382                 prev_hlock = curr->held_locks + depth-1;
2383                 /*
2384                  * If we cross into another context, reset the
2385                  * hash key (this also prevents the checking and the
2386                  * adding of the dependency to 'prev'):
2387                  */
2388                 if (prev_hlock->irq_context != hlock->irq_context)
2389                         return 1;
2390         }
2391         return 0;
2392 }
2393
2394 #else
2395
2396 static inline
2397 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2398                 enum lock_usage_bit new_bit)
2399 {
2400         WARN_ON(1);
2401         return 1;
2402 }
2403
2404 static inline int mark_irqflags(struct task_struct *curr,
2405                 struct held_lock *hlock)
2406 {
2407         return 1;
2408 }
2409
2410 static inline int separate_irq_context(struct task_struct *curr,
2411                 struct held_lock *hlock)
2412 {
2413         return 0;
2414 }
2415
2416 #endif
2417
2418 /*
2419  * Mark a lock with a usage bit, and validate the state transition:
2420  */
2421 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2422                              enum lock_usage_bit new_bit)
2423 {
2424         unsigned int new_mask = 1 << new_bit, ret = 1;
2425
2426         /*
2427          * If already set then do not dirty the cacheline,
2428          * nor do any checks:
2429          */
2430         if (likely(hlock_class(this)->usage_mask & new_mask))
2431                 return 1;
2432
2433         if (!graph_lock())
2434                 return 0;
2435         /*
2436          * Make sure we didnt race:
2437          */
2438         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2439                 graph_unlock();
2440                 return 1;
2441         }
2442
2443         hlock_class(this)->usage_mask |= new_mask;
2444
2445         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2446                 return 0;
2447
2448         switch (new_bit) {
2449         case LOCK_USED_IN_HARDIRQ:
2450         case LOCK_USED_IN_SOFTIRQ:
2451         case LOCK_USED_IN_HARDIRQ_READ:
2452         case LOCK_USED_IN_SOFTIRQ_READ:
2453         case LOCK_ENABLED_HARDIRQS:
2454         case LOCK_ENABLED_SOFTIRQS:
2455         case LOCK_ENABLED_HARDIRQS_READ:
2456         case LOCK_ENABLED_SOFTIRQS_READ:
2457                 ret = mark_lock_irq(curr, this, new_bit);
2458                 if (!ret)
2459                         return 0;
2460                 break;
2461         case LOCK_USED:
2462                 debug_atomic_dec(&nr_unused_locks);
2463                 break;
2464         default:
2465                 if (!debug_locks_off_graph_unlock())
2466                         return 0;
2467                 WARN_ON(1);
2468                 return 0;
2469         }
2470
2471         graph_unlock();
2472
2473         /*
2474          * We must printk outside of the graph_lock:
2475          */
2476         if (ret == 2) {
2477                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2478                 print_lock(this);
2479                 print_irqtrace_events(curr);
2480                 dump_stack();
2481         }
2482
2483         return ret;
2484 }
2485
2486 /*
2487  * Initialize a lock instance's lock-class mapping info:
2488  */
2489 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2490                       struct lock_class_key *key, int subclass)
2491 {
2492         if (unlikely(!debug_locks))
2493                 return;
2494
2495         if (DEBUG_LOCKS_WARN_ON(!key))
2496                 return;
2497         if (DEBUG_LOCKS_WARN_ON(!name))
2498                 return;
2499         /*
2500          * Sanity check, the lock-class key must be persistent:
2501          */
2502         if (!static_obj(key)) {
2503                 printk("BUG: key %p not in .data!\n", key);
2504                 DEBUG_LOCKS_WARN_ON(1);
2505                 return;
2506         }
2507         lock->name = name;
2508         lock->key = key;
2509         lock->class_cache = NULL;
2510 #ifdef CONFIG_LOCK_STAT
2511         lock->cpu = raw_smp_processor_id();
2512 #endif
2513         if (subclass)
2514                 register_lock_class(lock, subclass, 1);
2515 }
2516
2517 EXPORT_SYMBOL_GPL(lockdep_init_map);
2518
2519 /*
2520  * This gets called for every mutex_lock*()/spin_lock*() operation.
2521  * We maintain the dependency maps and validate the locking attempt:
2522  */
2523 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2524                           int trylock, int read, int check, int hardirqs_off,
2525                           struct lockdep_map *nest_lock, unsigned long ip)
2526 {
2527         struct task_struct *curr = current;
2528         struct lock_class *class = NULL;
2529         struct held_lock *hlock;
2530         unsigned int depth, id;
2531         int chain_head = 0;
2532         u64 chain_key;
2533
2534         if (!prove_locking)
2535                 check = 1;
2536
2537         if (unlikely(!debug_locks))
2538                 return 0;
2539
2540         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2541                 return 0;
2542
2543         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2544                 debug_locks_off();
2545                 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2546                 printk("turning off the locking correctness validator.\n");
2547                 return 0;
2548         }
2549
2550         if (!subclass)
2551                 class = lock->class_cache;
2552         /*
2553          * Not cached yet or subclass?
2554          */
2555         if (unlikely(!class)) {
2556                 class = register_lock_class(lock, subclass, 0);
2557                 if (!class)
2558                         return 0;
2559         }
2560         debug_atomic_inc((atomic_t *)&class->ops);
2561         if (very_verbose(class)) {
2562                 printk("\nacquire class [%p] %s", class->key, class->name);
2563                 if (class->name_version > 1)
2564                         printk("#%d", class->name_version);
2565                 printk("\n");
2566                 dump_stack();
2567         }
2568
2569         /*
2570          * Add the lock to the list of currently held locks.
2571          * (we dont increase the depth just yet, up until the
2572          * dependency checks are done)
2573          */
2574         depth = curr->lockdep_depth;
2575         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2576                 return 0;
2577
2578         hlock = curr->held_locks + depth;
2579         if (DEBUG_LOCKS_WARN_ON(!class))
2580                 return 0;
2581         hlock->class_idx = class - lock_classes + 1;
2582         hlock->acquire_ip = ip;
2583         hlock->instance = lock;
2584         hlock->nest_lock = nest_lock;
2585         hlock->trylock = trylock;
2586         hlock->read = read;
2587         hlock->check = check;
2588         hlock->hardirqs_off = !!hardirqs_off;
2589 #ifdef CONFIG_LOCK_STAT
2590         hlock->waittime_stamp = 0;
2591         hlock->holdtime_stamp = sched_clock();
2592 #endif
2593
2594         if (check == 2 && !mark_irqflags(curr, hlock))
2595                 return 0;
2596
2597         /* mark it as used: */
2598         if (!mark_lock(curr, hlock, LOCK_USED))
2599                 return 0;
2600
2601         /*
2602          * Calculate the chain hash: it's the combined hash of all the
2603          * lock keys along the dependency chain. We save the hash value
2604          * at every step so that we can get the current hash easily
2605          * after unlock. The chain hash is then used to cache dependency
2606          * results.
2607          *
2608          * The 'key ID' is what is the most compact key value to drive
2609          * the hash, not class->key.
2610          */
2611         id = class - lock_classes;
2612         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2613                 return 0;
2614
2615         chain_key = curr->curr_chain_key;
2616         if (!depth) {
2617                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2618                         return 0;
2619                 chain_head = 1;
2620         }
2621
2622         hlock->prev_chain_key = chain_key;
2623         if (separate_irq_context(curr, hlock)) {
2624                 chain_key = 0;
2625                 chain_head = 1;
2626         }
2627         chain_key = iterate_chain_key(chain_key, id);
2628
2629         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2630                 return 0;
2631
2632         curr->curr_chain_key = chain_key;
2633         curr->lockdep_depth++;
2634         check_chain_key(curr);
2635 #ifdef CONFIG_DEBUG_LOCKDEP
2636         if (unlikely(!debug_locks))
2637                 return 0;
2638 #endif
2639         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2640                 debug_locks_off();
2641                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2642                 printk("turning off the locking correctness validator.\n");
2643                 return 0;
2644         }
2645
2646         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2647                 max_lockdep_depth = curr->lockdep_depth;
2648
2649         return 1;
2650 }
2651
2652 static int
2653 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2654                            unsigned long ip)
2655 {
2656         if (!debug_locks_off())
2657                 return 0;
2658         if (debug_locks_silent)
2659                 return 0;
2660
2661         printk("\n=====================================\n");
2662         printk(  "[ BUG: bad unlock balance detected! ]\n");
2663         printk(  "-------------------------------------\n");
2664         printk("%s/%d is trying to release lock (",
2665                 curr->comm, task_pid_nr(curr));
2666         print_lockdep_cache(lock);
2667         printk(") at:\n");
2668         print_ip_sym(ip);
2669         printk("but there are no more locks to release!\n");
2670         printk("\nother info that might help us debug this:\n");
2671         lockdep_print_held_locks(curr);
2672
2673         printk("\nstack backtrace:\n");
2674         dump_stack();
2675
2676         return 0;
2677 }
2678
2679 /*
2680  * Common debugging checks for both nested and non-nested unlock:
2681  */
2682 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2683                         unsigned long ip)
2684 {
2685         if (unlikely(!debug_locks))
2686                 return 0;
2687         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2688                 return 0;
2689
2690         if (curr->lockdep_depth <= 0)
2691                 return print_unlock_inbalance_bug(curr, lock, ip);
2692
2693         return 1;
2694 }
2695
2696 static int
2697 __lock_set_subclass(struct lockdep_map *lock,
2698                     unsigned int subclass, unsigned long ip)
2699 {
2700         struct task_struct *curr = current;
2701         struct held_lock *hlock, *prev_hlock;
2702         struct lock_class *class;
2703         unsigned int depth;
2704         int i;
2705
2706         depth = curr->lockdep_depth;
2707         if (DEBUG_LOCKS_WARN_ON(!depth))
2708                 return 0;
2709
2710         prev_hlock = NULL;
2711         for (i = depth-1; i >= 0; i--) {
2712                 hlock = curr->held_locks + i;
2713                 /*
2714                  * We must not cross into another context:
2715                  */
2716                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2717                         break;
2718                 if (hlock->instance == lock)
2719                         goto found_it;
2720                 prev_hlock = hlock;
2721         }
2722         return print_unlock_inbalance_bug(curr, lock, ip);
2723
2724 found_it:
2725         class = register_lock_class(lock, subclass, 0);
2726         hlock->class_idx = class - lock_classes + 1;
2727
2728         curr->lockdep_depth = i;
2729         curr->curr_chain_key = hlock->prev_chain_key;
2730
2731         for (; i < depth; i++) {
2732                 hlock = curr->held_locks + i;
2733                 if (!__lock_acquire(hlock->instance,
2734                         hlock_class(hlock)->subclass, hlock->trylock,
2735                                 hlock->read, hlock->check, hlock->hardirqs_off,
2736                                 hlock->nest_lock, hlock->acquire_ip))
2737                         return 0;
2738         }
2739
2740         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2741                 return 0;
2742         return 1;
2743 }
2744
2745 /*
2746  * Remove the lock to the list of currently held locks in a
2747  * potentially non-nested (out of order) manner. This is a
2748  * relatively rare operation, as all the unlock APIs default
2749  * to nested mode (which uses lock_release()):
2750  */
2751 static int
2752 lock_release_non_nested(struct task_struct *curr,
2753                         struct lockdep_map *lock, unsigned long ip)
2754 {
2755         struct held_lock *hlock, *prev_hlock;
2756         unsigned int depth;
2757         int i;
2758
2759         /*
2760          * Check whether the lock exists in the current stack
2761          * of held locks:
2762          */
2763         depth = curr->lockdep_depth;
2764         if (DEBUG_LOCKS_WARN_ON(!depth))
2765                 return 0;
2766
2767         prev_hlock = NULL;
2768         for (i = depth-1; i >= 0; i--) {
2769                 hlock = curr->held_locks + i;
2770                 /*
2771                  * We must not cross into another context:
2772                  */
2773                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2774                         break;
2775                 if (hlock->instance == lock)
2776                         goto found_it;
2777                 prev_hlock = hlock;
2778         }
2779         return print_unlock_inbalance_bug(curr, lock, ip);
2780
2781 found_it:
2782         lock_release_holdtime(hlock);
2783
2784         /*
2785          * We have the right lock to unlock, 'hlock' points to it.
2786          * Now we remove it from the stack, and add back the other
2787          * entries (if any), recalculating the hash along the way:
2788          */
2789         curr->lockdep_depth = i;
2790         curr->curr_chain_key = hlock->prev_chain_key;
2791
2792         for (i++; i < depth; i++) {
2793                 hlock = curr->held_locks + i;
2794                 if (!__lock_acquire(hlock->instance,
2795                         hlock_class(hlock)->subclass, hlock->trylock,
2796                                 hlock->read, hlock->check, hlock->hardirqs_off,
2797                                 hlock->nest_lock, hlock->acquire_ip))
2798                         return 0;
2799         }
2800
2801         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2802                 return 0;
2803         return 1;
2804 }
2805
2806 /*
2807  * Remove the lock to the list of currently held locks - this gets
2808  * called on mutex_unlock()/spin_unlock*() (or on a failed
2809  * mutex_lock_interruptible()). This is done for unlocks that nest
2810  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2811  */
2812 static int lock_release_nested(struct task_struct *curr,
2813                                struct lockdep_map *lock, unsigned long ip)
2814 {
2815         struct held_lock *hlock;
2816         unsigned int depth;
2817
2818         /*
2819          * Pop off the top of the lock stack:
2820          */
2821         depth = curr->lockdep_depth - 1;
2822         hlock = curr->held_locks + depth;
2823
2824         /*
2825          * Is the unlock non-nested:
2826          */
2827         if (hlock->instance != lock)
2828                 return lock_release_non_nested(curr, lock, ip);
2829         curr->lockdep_depth--;
2830
2831         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2832                 return 0;
2833
2834         curr->curr_chain_key = hlock->prev_chain_key;
2835
2836         lock_release_holdtime(hlock);
2837
2838 #ifdef CONFIG_DEBUG_LOCKDEP
2839         hlock->prev_chain_key = 0;
2840         hlock->class_idx = 0;
2841         hlock->acquire_ip = 0;
2842         hlock->irq_context = 0;
2843 #endif
2844         return 1;
2845 }
2846
2847 /*
2848  * Remove the lock to the list of currently held locks - this gets
2849  * called on mutex_unlock()/spin_unlock*() (or on a failed
2850  * mutex_lock_interruptible()). This is done for unlocks that nest
2851  * perfectly. (i.e. the current top of the lock-stack is unlocked)
2852  */
2853 static void
2854 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2855 {
2856         struct task_struct *curr = current;
2857
2858         if (!check_unlock(curr, lock, ip))
2859                 return;
2860
2861         if (nested) {
2862                 if (!lock_release_nested(curr, lock, ip))
2863                         return;
2864         } else {
2865                 if (!lock_release_non_nested(curr, lock, ip))
2866                         return;
2867         }
2868
2869         check_chain_key(curr);
2870 }
2871
2872 /*
2873  * Check whether we follow the irq-flags state precisely:
2874  */
2875 static void check_flags(unsigned long flags)
2876 {
2877 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2878     defined(CONFIG_TRACE_IRQFLAGS)
2879         if (!debug_locks)
2880                 return;
2881
2882         if (irqs_disabled_flags(flags)) {
2883                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2884                         printk("possible reason: unannotated irqs-off.\n");
2885                 }
2886         } else {
2887                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2888                         printk("possible reason: unannotated irqs-on.\n");
2889                 }
2890         }
2891
2892         /*
2893          * We dont accurately track softirq state in e.g.
2894          * hardirq contexts (such as on 4KSTACKS), so only
2895          * check if not in hardirq contexts:
2896          */
2897         if (!hardirq_count()) {
2898                 if (softirq_count())
2899                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2900                 else
2901                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2902         }
2903
2904         if (!debug_locks)
2905                 print_irqtrace_events(current);
2906 #endif
2907 }
2908
2909 void
2910 lock_set_subclass(struct lockdep_map *lock,
2911                   unsigned int subclass, unsigned long ip)
2912 {
2913         unsigned long flags;
2914
2915         if (unlikely(current->lockdep_recursion))
2916                 return;
2917
2918         raw_local_irq_save(flags);
2919         current->lockdep_recursion = 1;
2920         check_flags(flags);
2921         if (__lock_set_subclass(lock, subclass, ip))
2922                 check_chain_key(current);
2923         current->lockdep_recursion = 0;
2924         raw_local_irq_restore(flags);
2925 }
2926
2927 EXPORT_SYMBOL_GPL(lock_set_subclass);
2928
2929 /*
2930  * We are not always called with irqs disabled - do that here,
2931  * and also avoid lockdep recursion:
2932  */
2933 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2934                           int trylock, int read, int check,
2935                           struct lockdep_map *nest_lock, unsigned long ip)
2936 {
2937         unsigned long flags;
2938
2939         if (unlikely(current->lockdep_recursion))
2940                 return;
2941
2942         raw_local_irq_save(flags);
2943         check_flags(flags);
2944
2945         current->lockdep_recursion = 1;
2946         __lock_acquire(lock, subclass, trylock, read, check,
2947                        irqs_disabled_flags(flags), nest_lock, ip);
2948         current->lockdep_recursion = 0;
2949         raw_local_irq_restore(flags);
2950 }
2951
2952 EXPORT_SYMBOL_GPL(lock_acquire);
2953
2954 void lock_release(struct lockdep_map *lock, int nested,
2955                           unsigned long ip)
2956 {
2957         unsigned long flags;
2958
2959         if (unlikely(current->lockdep_recursion))
2960                 return;
2961
2962         raw_local_irq_save(flags);
2963         check_flags(flags);
2964         current->lockdep_recursion = 1;
2965         __lock_release(lock, nested, ip);
2966         current->lockdep_recursion = 0;
2967         raw_local_irq_restore(flags);
2968 }
2969
2970 EXPORT_SYMBOL_GPL(lock_release);
2971
2972 #ifdef CONFIG_LOCK_STAT
2973 static int
2974 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2975                            unsigned long ip)
2976 {
2977         if (!debug_locks_off())
2978                 return 0;
2979         if (debug_locks_silent)
2980                 return 0;
2981
2982         printk("\n=================================\n");
2983         printk(  "[ BUG: bad contention detected! ]\n");
2984         printk(  "---------------------------------\n");
2985         printk("%s/%d is trying to contend lock (",
2986                 curr->comm, task_pid_nr(curr));
2987         print_lockdep_cache(lock);
2988         printk(") at:\n");
2989         print_ip_sym(ip);
2990         printk("but there are no locks held!\n");
2991         printk("\nother info that might help us debug this:\n");
2992         lockdep_print_held_locks(curr);
2993
2994         printk("\nstack backtrace:\n");
2995         dump_stack();
2996
2997         return 0;
2998 }
2999
3000 static void
3001 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3002 {
3003         struct task_struct *curr = current;
3004         struct held_lock *hlock, *prev_hlock;
3005         struct lock_class_stats *stats;
3006         unsigned int depth;
3007         int i, contention_point, contending_point;
3008
3009         depth = curr->lockdep_depth;
3010         if (DEBUG_LOCKS_WARN_ON(!depth))
3011                 return;
3012
3013         prev_hlock = NULL;
3014         for (i = depth-1; i >= 0; i--) {
3015                 hlock = curr->held_locks + i;
3016                 /*
3017                  * We must not cross into another context:
3018                  */
3019                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3020                         break;
3021                 if (hlock->instance == lock)
3022                         goto found_it;
3023                 prev_hlock = hlock;
3024         }
3025         print_lock_contention_bug(curr, lock, ip);
3026         return;
3027
3028 found_it:
3029         hlock->waittime_stamp = sched_clock();
3030
3031         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3032         contending_point = lock_point(hlock_class(hlock)->contending_point,
3033                                       lock->ip);
3034
3035         stats = get_lock_stats(hlock_class(hlock));
3036         if (contention_point < LOCKSTAT_POINTS)
3037                 stats->contention_point[contention_point]++;
3038         if (contending_point < LOCKSTAT_POINTS)
3039                 stats->contending_point[contending_point]++;
3040         if (lock->cpu != smp_processor_id())
3041                 stats->bounces[bounce_contended + !!hlock->read]++;
3042         put_lock_stats(stats);
3043 }
3044
3045 static void
3046 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3047 {
3048         struct task_struct *curr = current;
3049         struct held_lock *hlock, *prev_hlock;
3050         struct lock_class_stats *stats;
3051         unsigned int depth;
3052         u64 now;
3053         s64 waittime = 0;
3054         int i, cpu;
3055
3056         depth = curr->lockdep_depth;
3057         if (DEBUG_LOCKS_WARN_ON(!depth))
3058                 return;
3059
3060         prev_hlock = NULL;
3061         for (i = depth-1; i >= 0; i--) {
3062                 hlock = curr->held_locks + i;
3063                 /*
3064                  * We must not cross into another context:
3065                  */
3066                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3067                         break;
3068                 if (hlock->instance == lock)
3069                         goto found_it;
3070                 prev_hlock = hlock;
3071         }
3072         print_lock_contention_bug(curr, lock, _RET_IP_);
3073         return;
3074
3075 found_it:
3076         cpu = smp_processor_id();
3077         if (hlock->waittime_stamp) {
3078                 now = sched_clock();
3079                 waittime = now - hlock->waittime_stamp;
3080                 hlock->holdtime_stamp = now;
3081         }
3082
3083         stats = get_lock_stats(hlock_class(hlock));
3084         if (waittime) {
3085                 if (hlock->read)
3086                         lock_time_inc(&stats->read_waittime, waittime);
3087                 else
3088                         lock_time_inc(&stats->write_waittime, waittime);
3089         }
3090         if (lock->cpu != cpu)
3091                 stats->bounces[bounce_acquired + !!hlock->read]++;
3092         put_lock_stats(stats);
3093
3094         lock->cpu = cpu;
3095         lock->ip = ip;
3096 }
3097
3098 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3099 {
3100         unsigned long flags;
3101
3102         if (unlikely(!lock_stat))
3103                 return;
3104
3105         if (unlikely(current->lockdep_recursion))
3106                 return;
3107
3108         raw_local_irq_save(flags);
3109         check_flags(flags);
3110         current->lockdep_recursion = 1;
3111         __lock_contended(lock, ip);
3112         current->lockdep_recursion = 0;
3113         raw_local_irq_restore(flags);
3114 }
3115 EXPORT_SYMBOL_GPL(lock_contended);
3116
3117 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3118 {
3119         unsigned long flags;
3120
3121         if (unlikely(!lock_stat))
3122                 return;
3123
3124         if (unlikely(current->lockdep_recursion))
3125                 return;
3126
3127         raw_local_irq_save(flags);
3128         check_flags(flags);
3129         current->lockdep_recursion = 1;
3130         __lock_acquired(lock, ip);
3131         current->lockdep_recursion = 0;
3132         raw_local_irq_restore(flags);
3133 }
3134 EXPORT_SYMBOL_GPL(lock_acquired);
3135 #endif
3136
3137 /*
3138  * Used by the testsuite, sanitize the validator state
3139  * after a simulated failure:
3140  */
3141
3142 void lockdep_reset(void)
3143 {
3144         unsigned long flags;
3145         int i;
3146
3147         raw_local_irq_save(flags);
3148         current->curr_chain_key = 0;
3149         current->lockdep_depth = 0;
3150         current->lockdep_recursion = 0;
3151         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3152         nr_hardirq_chains = 0;
3153         nr_softirq_chains = 0;
3154         nr_process_chains = 0;
3155         debug_locks = 1;
3156         for (i = 0; i < CHAINHASH_SIZE; i++)
3157                 INIT_LIST_HEAD(chainhash_table + i);
3158         raw_local_irq_restore(flags);
3159 }
3160
3161 static void zap_class(struct lock_class *class)
3162 {
3163         int i;
3164
3165         /*
3166          * Remove all dependencies this lock is
3167          * involved in:
3168          */
3169         for (i = 0; i < nr_list_entries; i++) {
3170                 if (list_entries[i].class == class)
3171                         list_del_rcu(&list_entries[i].entry);
3172         }
3173         /*
3174          * Unhash the class and remove it from the all_lock_classes list:
3175          */
3176         list_del_rcu(&class->hash_entry);
3177         list_del_rcu(&class->lock_entry);
3178
3179         class->key = NULL;
3180 }
3181
3182 static inline int within(const void *addr, void *start, unsigned long size)
3183 {
3184         return addr >= start && addr < start + size;
3185 }
3186
3187 void lockdep_free_key_range(void *start, unsigned long size)
3188 {
3189         struct lock_class *class, *next;
3190         struct list_head *head;
3191         unsigned long flags;
3192         int i;
3193         int locked;
3194
3195         raw_local_irq_save(flags);
3196         locked = graph_lock();
3197
3198         /*
3199          * Unhash all classes that were created by this module:
3200          */
3201         for (i = 0; i < CLASSHASH_SIZE; i++) {
3202                 head = classhash_table + i;
3203                 if (list_empty(head))
3204                         continue;
3205                 list_for_each_entry_safe(class, next, head, hash_entry) {
3206                         if (within(class->key, start, size))
3207                                 zap_class(class);
3208                         else if (within(class->name, start, size))
3209                                 zap_class(class);
3210                 }
3211         }
3212
3213         if (locked)
3214                 graph_unlock();
3215         raw_local_irq_restore(flags);
3216 }
3217
3218 void lockdep_reset_lock(struct lockdep_map *lock)
3219 {
3220         struct lock_class *class, *next;
3221         struct list_head *head;
3222         unsigned long flags;
3223         int i, j;
3224         int locked;
3225
3226         raw_local_irq_save(flags);
3227
3228         /*
3229          * Remove all classes this lock might have:
3230          */
3231         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3232                 /*
3233                  * If the class exists we look it up and zap it:
3234                  */
3235                 class = look_up_lock_class(lock, j);
3236                 if (class)
3237                         zap_class(class);
3238         }
3239         /*
3240          * Debug check: in the end all mapped classes should
3241          * be gone.
3242          */
3243         locked = graph_lock();
3244         for (i = 0; i < CLASSHASH_SIZE; i++) {
3245                 head = classhash_table + i;
3246                 if (list_empty(head))
3247                         continue;
3248                 list_for_each_entry_safe(class, next, head, hash_entry) {
3249                         if (unlikely(class == lock->class_cache)) {
3250                                 if (debug_locks_off_graph_unlock())
3251                                         WARN_ON(1);
3252                                 goto out_restore;
3253                         }
3254                 }
3255         }
3256         if (locked)
3257                 graph_unlock();
3258
3259 out_restore:
3260         raw_local_irq_restore(flags);
3261 }
3262
3263 void lockdep_init(void)
3264 {
3265         int i;
3266
3267         /*
3268          * Some architectures have their own start_kernel()
3269          * code which calls lockdep_init(), while we also
3270          * call lockdep_init() from the start_kernel() itself,
3271          * and we want to initialize the hashes only once:
3272          */
3273         if (lockdep_initialized)
3274                 return;
3275
3276         for (i = 0; i < CLASSHASH_SIZE; i++)
3277                 INIT_LIST_HEAD(classhash_table + i);
3278
3279         for (i = 0; i < CHAINHASH_SIZE; i++)
3280                 INIT_LIST_HEAD(chainhash_table + i);
3281
3282         lockdep_initialized = 1;
3283 }
3284
3285 void __init lockdep_info(void)
3286 {
3287         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3288
3289         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3290         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3291         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3292         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3293         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3294         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3295         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3296
3297         printk(" memory used by lock dependency info: %lu kB\n",
3298                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3299                 sizeof(struct list_head) * CLASSHASH_SIZE +
3300                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3301                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3302                 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3303
3304         printk(" per task-struct memory footprint: %lu bytes\n",
3305                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3306
3307 #ifdef CONFIG_DEBUG_LOCKDEP
3308         if (lockdep_init_error) {
3309                 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3310                 printk("Call stack leading to lockdep invocation was:\n");
3311                 print_stack_trace(&lockdep_init_trace, 0);
3312         }
3313 #endif
3314 }
3315
3316 static void
3317 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3318                      const void *mem_to, struct held_lock *hlock)
3319 {
3320         if (!debug_locks_off())
3321                 return;
3322         if (debug_locks_silent)
3323                 return;
3324
3325         printk("\n=========================\n");
3326         printk(  "[ BUG: held lock freed! ]\n");
3327         printk(  "-------------------------\n");
3328         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3329                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3330         print_lock(hlock);
3331         lockdep_print_held_locks(curr);
3332
3333         printk("\nstack backtrace:\n");
3334         dump_stack();
3335 }
3336
3337 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3338                                 const void* lock_from, unsigned long lock_len)
3339 {
3340         return lock_from + lock_len <= mem_from ||
3341                 mem_from + mem_len <= lock_from;
3342 }
3343
3344 /*
3345  * Called when kernel memory is freed (or unmapped), or if a lock
3346  * is destroyed or reinitialized - this code checks whether there is
3347  * any held lock in the memory range of <from> to <to>:
3348  */
3349 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3350 {
3351         struct task_struct *curr = current;
3352         struct held_lock *hlock;
3353         unsigned long flags;
3354         int i;
3355
3356         if (unlikely(!debug_locks))
3357                 return;
3358
3359         local_irq_save(flags);
3360         for (i = 0; i < curr->lockdep_depth; i++) {
3361                 hlock = curr->held_locks + i;
3362
3363                 if (not_in_range(mem_from, mem_len, hlock->instance,
3364                                         sizeof(*hlock->instance)))
3365                         continue;
3366
3367                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3368                 break;
3369         }
3370         local_irq_restore(flags);
3371 }
3372 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3373
3374 static void print_held_locks_bug(struct task_struct *curr)
3375 {
3376         if (!debug_locks_off())
3377                 return;
3378         if (debug_locks_silent)
3379                 return;
3380
3381         printk("\n=====================================\n");
3382         printk(  "[ BUG: lock held at task exit time! ]\n");
3383         printk(  "-------------------------------------\n");
3384         printk("%s/%d is exiting with locks still held!\n",
3385                 curr->comm, task_pid_nr(curr));
3386         lockdep_print_held_locks(curr);
3387
3388         printk("\nstack backtrace:\n");
3389         dump_stack();
3390 }
3391
3392 void debug_check_no_locks_held(struct task_struct *task)
3393 {
3394         if (unlikely(task->lockdep_depth > 0))
3395                 print_held_locks_bug(task);
3396 }
3397
3398 void debug_show_all_locks(void)
3399 {
3400         struct task_struct *g, *p;
3401         int count = 10;
3402         int unlock = 1;
3403
3404         if (unlikely(!debug_locks)) {
3405                 printk("INFO: lockdep is turned off.\n");
3406                 return;
3407         }
3408         printk("\nShowing all locks held in the system:\n");
3409
3410         /*
3411          * Here we try to get the tasklist_lock as hard as possible,
3412          * if not successful after 2 seconds we ignore it (but keep
3413          * trying). This is to enable a debug printout even if a
3414          * tasklist_lock-holding task deadlocks or crashes.
3415          */
3416 retry:
3417         if (!read_trylock(&tasklist_lock)) {
3418                 if (count == 10)
3419                         printk("hm, tasklist_lock locked, retrying... ");
3420                 if (count) {
3421                         count--;
3422                         printk(" #%d", 10-count);
3423                         mdelay(200);
3424                         goto retry;
3425                 }
3426                 printk(" ignoring it.\n");
3427                 unlock = 0;
3428         } else {
3429                 if (count != 10)
3430                         printk(KERN_CONT " locked it.\n");
3431         }
3432
3433         do_each_thread(g, p) {
3434                 /*
3435                  * It's not reliable to print a task's held locks
3436                  * if it's not sleeping (or if it's not the current
3437                  * task):
3438                  */
3439                 if (p->state == TASK_RUNNING && p != current)
3440                         continue;
3441                 if (p->lockdep_depth)
3442                         lockdep_print_held_locks(p);
3443                 if (!unlock)
3444                         if (read_trylock(&tasklist_lock))
3445                                 unlock = 1;
3446         } while_each_thread(g, p);
3447
3448         printk("\n");
3449         printk("=============================================\n\n");
3450
3451         if (unlock)
3452                 read_unlock(&tasklist_lock);
3453 }
3454
3455 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3456
3457 /*
3458  * Careful: only use this function if you are sure that
3459  * the task cannot run in parallel!
3460  */
3461 void __debug_show_held_locks(struct task_struct *task)
3462 {
3463         if (unlikely(!debug_locks)) {
3464                 printk("INFO: lockdep is turned off.\n");
3465                 return;
3466         }
3467         lockdep_print_held_locks(task);
3468 }
3469 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3470
3471 void debug_show_held_locks(struct task_struct *task)
3472 {
3473                 __debug_show_held_locks(task);
3474 }
3475
3476 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3477
3478 void lockdep_sys_exit(void)
3479 {
3480         struct task_struct *curr = current;
3481
3482         if (unlikely(curr->lockdep_depth)) {
3483                 if (!debug_locks_off())
3484                         return;
3485                 printk("\n================================================\n");
3486                 printk(  "[ BUG: lock held when returning to user space! ]\n");
3487                 printk(  "------------------------------------------------\n");
3488                 printk("%s/%d is leaving the kernel with locks still held!\n",
3489                                 curr->comm, curr->pid);
3490                 lockdep_print_held_locks(curr);
3491         }
3492 }