blob: 630be9ac62533504818f37247a5f1d02b937d68d [file] [log] [blame]
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -07008 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
Peter Zijlstra90eec102015-11-16 11:08:45 +01009 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070010 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
Steven Rostedta5e25882008-12-02 15:34:05 -050028#define DISABLE_BRANCH_PROFILING
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070029#include <linux/mutex.h>
30#include <linux/sched.h>
Ingo Molnare6017572017-02-01 16:36:40 +010031#include <linux/sched/clock.h>
Ingo Molnar29930022017-02-08 18:51:36 +010032#include <linux/sched/task.h>
Nikolay Borisov6d7225f2017-05-03 14:53:05 -070033#include <linux/sched/mm.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070034#include <linux/delay.h>
35#include <linux/module.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/spinlock.h>
39#include <linux/kallsyms.h>
40#include <linux/interrupt.h>
41#include <linux/stacktrace.h>
42#include <linux/debug_locks.h>
43#include <linux/irqflags.h>
Dave Jones99de0552006-09-29 02:00:10 -070044#include <linux/utsname.h>
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -070045#include <linux/hash.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020046#include <linux/ftrace.h>
Peter Zijlstrab4b136f2009-01-29 14:50:36 +010047#include <linux/stringify.h>
Bart Van Asscheace35a72019-02-14 15:00:47 -080048#include <linux/bitmap.h>
Ming Leid588e462009-07-16 15:44:29 +020049#include <linux/bitops.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090050#include <linux/gfp.h>
Peter Zijlstrae7904a22015-08-01 19:25:08 +020051#include <linux/random.h>
Peter Zijlstradfaaf3f2016-05-30 18:31:33 +020052#include <linux/jhash.h>
Tejun Heo88f1c872018-01-22 14:00:55 -080053#include <linux/nmi.h>
Bart Van Asschea0b0fd52019-02-14 15:00:46 -080054#include <linux/rcupdate.h>
Peter Zijlstraaf012962009-07-16 15:44:29 +020055
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070056#include <asm/sections.h>
57
58#include "lockdep_internals.h"
59
Steven Rostedta8d154b2009-04-10 09:36:00 -040060#define CREATE_TRACE_POINTS
Frederic Weisbecker67178762009-11-13 10:06:34 +010061#include <trace/events/lock.h>
Steven Rostedta8d154b2009-04-10 09:36:00 -040062
Peter Zijlstraf20786f2007-07-19 01:48:56 -070063#ifdef CONFIG_PROVE_LOCKING
64int prove_locking = 1;
65module_param(prove_locking, int, 0644);
66#else
67#define prove_locking 0
68#endif
69
70#ifdef CONFIG_LOCK_STAT
71int lock_stat = 1;
72module_param(lock_stat, int, 0644);
73#else
74#define lock_stat 0
75#endif
76
Bart Van Asscheb526b2e2019-02-14 15:00:51 -080077static bool check_data_structure_consistency;
78
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070079/*
Ingo Molnar74c383f2006-12-13 00:34:43 -080080 * lockdep_lock: protects the lockdep graph, the hashes and the
81 * class/list/hash allocators.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070082 *
83 * This is one of the rare exceptions where it's justified
84 * to use a raw spinlock - we really dont want the spinlock
Ingo Molnar74c383f2006-12-13 00:34:43 -080085 * code to recurse back into the lockdep code...
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070086 */
Thomas Gleixneredc35bd2009-12-03 12:38:57 +010087static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Bart Van Asschecdc84d72019-02-14 15:00:44 -080088static struct task_struct *lockdep_selftest_task_struct;
Ingo Molnar74c383f2006-12-13 00:34:43 -080089
90static int graph_lock(void)
91{
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010092 arch_spin_lock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -080093 /*
94 * Make sure that if another CPU detected a bug while
95 * walking the graph we dont change it (while the other
96 * CPU is busy printing out stuff with the graph lock
97 * dropped already)
98 */
99 if (!debug_locks) {
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100100 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800101 return 0;
102 }
Steven Rostedtbb065af2008-05-12 21:21:00 +0200103 /* prevent any recursions within lockdep from causing deadlocks */
104 current->lockdep_recursion++;
Ingo Molnar74c383f2006-12-13 00:34:43 -0800105 return 1;
106}
107
108static inline int graph_unlock(void)
109{
Peter Zijlstra0119fee2011-09-02 01:30:29 +0200110 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
111 /*
112 * The lockdep graph lock isn't locked while we expect it to
113 * be, we're confused now, bye!
114 */
Jarek Poplawski381a2292007-02-10 01:44:58 -0800115 return DEBUG_LOCKS_WARN_ON(1);
Peter Zijlstra0119fee2011-09-02 01:30:29 +0200116 }
Jarek Poplawski381a2292007-02-10 01:44:58 -0800117
Steven Rostedtbb065af2008-05-12 21:21:00 +0200118 current->lockdep_recursion--;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100119 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800120 return 0;
121}
122
123/*
124 * Turn lock debugging off and return with 0 if it was off already,
125 * and also release the graph lock:
126 */
127static inline int debug_locks_off_graph_unlock(void)
128{
129 int ret = debug_locks_off();
130
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100131 arch_spin_unlock(&lockdep_lock);
Ingo Molnar74c383f2006-12-13 00:34:43 -0800132
133 return ret;
134}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700135
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700136unsigned long nr_list_entries;
Peter Zijlstraaf012962009-07-16 15:44:29 +0200137static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
Bart Van Asscheace35a72019-02-14 15:00:47 -0800138static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700139
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700140/*
141 * All data structures here are protected by the global debug_lock.
142 *
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800143 * nr_lock_classes is the number of elements of lock_classes[] that is
144 * in use.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700145 */
146unsigned long nr_lock_classes;
Bart Van Assche1431a5d2018-12-06 17:11:32 -0800147#ifndef CONFIG_DEBUG_LOCKDEP
148static
149#endif
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400150struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700151
Dave Jonesf82b2172008-08-11 09:30:23 +0200152static inline struct lock_class *hlock_class(struct held_lock *hlock)
153{
154 if (!hlock->class_idx) {
Peter Zijlstra0119fee2011-09-02 01:30:29 +0200155 /*
156 * Someone passed in garbage, we give up.
157 */
Dave Jonesf82b2172008-08-11 09:30:23 +0200158 DEBUG_LOCKS_WARN_ON(1);
159 return NULL;
160 }
161 return lock_classes + hlock->class_idx - 1;
162}
163
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700164#ifdef CONFIG_LOCK_STAT
Peter Zijlstra25528212016-03-15 14:52:49 -0700165static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700166
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200167static inline u64 lockstat_clock(void)
168{
Peter Zijlstrac6763292010-05-25 10:48:51 +0200169 return local_clock();
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200170}
171
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200172static int lock_point(unsigned long points[], unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700173{
174 int i;
175
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200176 for (i = 0; i < LOCKSTAT_POINTS; i++) {
177 if (points[i] == 0) {
178 points[i] = ip;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700179 break;
180 }
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200181 if (points[i] == ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700182 break;
183 }
184
185 return i;
186}
187
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200188static void lock_time_inc(struct lock_time *lt, u64 time)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700189{
190 if (time > lt->max)
191 lt->max = time;
192
Frank Rowand109d71c2009-11-19 13:42:06 -0800193 if (time < lt->min || !lt->nr)
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700194 lt->min = time;
195
196 lt->total += time;
197 lt->nr++;
198}
199
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700200static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
201{
Frank Rowand109d71c2009-11-19 13:42:06 -0800202 if (!src->nr)
203 return;
204
205 if (src->max > dst->max)
206 dst->max = src->max;
207
208 if (src->min < dst->min || !dst->nr)
209 dst->min = src->min;
210
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700211 dst->total += src->total;
212 dst->nr += src->nr;
213}
214
215struct lock_class_stats lock_stats(struct lock_class *class)
216{
217 struct lock_class_stats stats;
218 int cpu, i;
219
220 memset(&stats, 0, sizeof(struct lock_class_stats));
221 for_each_possible_cpu(cpu) {
222 struct lock_class_stats *pcs =
Tejun Heo1871e522009-10-29 22:34:13 +0900223 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700224
225 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
226 stats.contention_point[i] += pcs->contention_point[i];
227
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200228 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
229 stats.contending_point[i] += pcs->contending_point[i];
230
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700231 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
232 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
233
234 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
235 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
Peter Zijlstra96645672007-07-19 01:49:00 -0700236
237 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
238 stats.bounces[i] += pcs->bounces[i];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700239 }
240
241 return stats;
242}
243
244void clear_lock_stats(struct lock_class *class)
245{
246 int cpu;
247
248 for_each_possible_cpu(cpu) {
249 struct lock_class_stats *cpu_stats =
Tejun Heo1871e522009-10-29 22:34:13 +0900250 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700251
252 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
253 }
254 memset(class->contention_point, 0, sizeof(class->contention_point));
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200255 memset(class->contending_point, 0, sizeof(class->contending_point));
Peter Zijlstrac46261d2007-07-19 01:48:57 -0700256}
257
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700258static struct lock_class_stats *get_lock_stats(struct lock_class *class)
259{
Joel Fernandes (Google)01f38492018-07-30 15:24:21 -0700260 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700261}
262
263static void lock_release_holdtime(struct held_lock *hlock)
264{
265 struct lock_class_stats *stats;
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200266 u64 holdtime;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700267
268 if (!lock_stat)
269 return;
270
Peter Zijlstra3365e7792009-10-09 10:12:41 +0200271 holdtime = lockstat_clock() - hlock->holdtime_stamp;
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700272
Dave Jonesf82b2172008-08-11 09:30:23 +0200273 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700274 if (hlock->read)
275 lock_time_inc(&stats->read_holdtime, holdtime);
276 else
277 lock_time_inc(&stats->write_holdtime, holdtime);
Peter Zijlstraf20786f2007-07-19 01:48:56 -0700278}
279#else
280static inline void lock_release_holdtime(struct held_lock *hlock)
281{
282}
283#endif
284
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700285/*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800286 * We keep a global list of all lock classes. The list is only accessed with
287 * the lockdep spinlock lock held. free_lock_classes is a list with free
288 * elements. These elements are linked together by the lock_entry member in
289 * struct lock_class.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700290 */
291LIST_HEAD(all_lock_classes);
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800292static LIST_HEAD(free_lock_classes);
293
294/**
295 * struct pending_free - information about data structures about to be freed
296 * @zapped: Head of a list with struct lock_class elements.
Bart Van Asschede4643a2019-02-14 15:00:50 -0800297 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
298 * are about to be freed.
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800299 */
300struct pending_free {
301 struct list_head zapped;
Bart Van Asschede4643a2019-02-14 15:00:50 -0800302 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800303};
304
305/**
306 * struct delayed_free - data structures used for delayed freeing
307 *
308 * A data structure for delayed freeing of data structures that may be
309 * accessed by RCU readers at the time these were freed.
310 *
311 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
312 * @index: Index of @pf to which freed data structures are added.
313 * @scheduled: Whether or not an RCU callback has been scheduled.
314 * @pf: Array with information about data structures about to be freed.
315 */
316static struct delayed_free {
317 struct rcu_head rcu_head;
318 int index;
319 int scheduled;
320 struct pending_free pf[2];
321} delayed_free;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700322
323/*
324 * The lockdep classes are in a hash-table as well, for fast lookup:
325 */
326#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
327#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700328#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700329#define classhashentry(key) (classhash_table + __classhashfn((key)))
330
Andrew Mortona63f38c2016-02-03 13:44:12 -0800331static struct hlist_head classhash_table[CLASSHASH_SIZE];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700332
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700333/*
334 * We put the lock dependency chains into a hash-table as well, to cache
335 * their existence:
336 */
337#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
338#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700339#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700340#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
341
Andrew Mortona63f38c2016-02-03 13:44:12 -0800342static struct hlist_head chainhash_table[CHAINHASH_SIZE];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700343
344/*
345 * The hash key of the lock dependency chains is a hash itself too:
346 * it's a hash of all locks taken up to that lock, including that lock.
347 * It's a 64-bit hash, because it's important for the keys to be
348 * unique.
349 */
Peter Zijlstradfaaf3f2016-05-30 18:31:33 +0200350static inline u64 iterate_chain_key(u64 key, u32 idx)
351{
352 u32 k0 = key, k1 = key >> 32;
353
354 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
355
356 return k0 | (u64)k1 << 32;
357}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700358
Steven Rostedt1d09daa2008-05-12 21:20:55 +0200359void lockdep_off(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700360{
361 current->lockdep_recursion++;
362}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700363EXPORT_SYMBOL(lockdep_off);
364
Steven Rostedt1d09daa2008-05-12 21:20:55 +0200365void lockdep_on(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700366{
367 current->lockdep_recursion--;
368}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700369EXPORT_SYMBOL(lockdep_on);
370
Bart Van Asschecdc84d72019-02-14 15:00:44 -0800371void lockdep_set_selftest_task(struct task_struct *task)
372{
373 lockdep_selftest_task_struct = task;
374}
375
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700376/*
377 * Debugging switches:
378 */
379
380#define VERBOSE 0
Ingo Molnar33e94e92006-12-13 00:34:41 -0800381#define VERY_VERBOSE 0
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700382
383#if VERBOSE
384# define HARDIRQ_VERBOSE 1
385# define SOFTIRQ_VERBOSE 1
386#else
387# define HARDIRQ_VERBOSE 0
388# define SOFTIRQ_VERBOSE 0
389#endif
390
Peter Zijlstrad92a8cf2017-03-03 10:13:38 +0100391#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700392/*
393 * Quick filtering for interesting events:
394 */
395static int class_filter(struct lock_class *class)
396{
Andi Kleenf9829cc2006-07-10 04:44:01 -0700397#if 0
398 /* Example */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700399 if (class->name_version == 1 &&
Andi Kleenf9829cc2006-07-10 04:44:01 -0700400 !strcmp(class->name, "lockname"))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700401 return 1;
402 if (class->name_version == 1 &&
Andi Kleenf9829cc2006-07-10 04:44:01 -0700403 !strcmp(class->name, "&struct->lockfield"))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700404 return 1;
Andi Kleenf9829cc2006-07-10 04:44:01 -0700405#endif
Ingo Molnara6640892006-12-13 00:34:39 -0800406 /* Filter everything else. 1 would be to allow everything else */
407 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700408}
409#endif
410
411static int verbose(struct lock_class *class)
412{
413#if VERBOSE
414 return class_filter(class);
415#endif
416 return 0;
417}
418
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700419/*
420 * Stack-trace: tightly packed array of stack backtrace
Ingo Molnar74c383f2006-12-13 00:34:43 -0800421 * addresses. Protected by the graph_lock.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700422 */
423unsigned long nr_stack_trace_entries;
424static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
425
Dave Jones2c522832013-04-25 13:40:02 -0400426static void print_lockdep_off(const char *bug_msg)
427{
428 printk(KERN_DEBUG "%s\n", bug_msg);
429 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
Andreas Gruenbacheracf59372014-07-15 21:10:52 +0200430#ifdef CONFIG_LOCK_STAT
Dave Jones2c522832013-04-25 13:40:02 -0400431 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
Andreas Gruenbacheracf59372014-07-15 21:10:52 +0200432#endif
Dave Jones2c522832013-04-25 13:40:02 -0400433}
434
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700435static int save_trace(struct stack_trace *trace)
436{
437 trace->nr_entries = 0;
438 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
439 trace->entries = stack_trace + nr_stack_trace_entries;
440
Andi Kleen5a1b3992006-09-26 10:52:34 +0200441 trace->skip = 3;
Andi Kleen5a1b3992006-09-26 10:52:34 +0200442
Christoph Hellwigab1b6f02007-05-08 00:23:29 -0700443 save_stack_trace(trace);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700444
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200445 /*
446 * Some daft arches put -1 at the end to indicate its a full trace.
447 *
448 * <rant> this is buggy anyway, since it takes a whole extra entry so a
449 * complete trace that maxes out the entries provided will be reported
450 * as incomplete, friggin useless </rant>
451 */
Luck, Tonyea5b41f2009-12-09 14:29:36 -0800452 if (trace->nr_entries != 0 &&
453 trace->entries[trace->nr_entries-1] == ULONG_MAX)
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200454 trace->nr_entries--;
455
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700456 trace->max_entries = trace->nr_entries;
457
458 nr_stack_trace_entries += trace->nr_entries;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700459
Peter Zijlstra4f84f432009-07-20 15:27:04 +0200460 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
Ingo Molnar74c383f2006-12-13 00:34:43 -0800461 if (!debug_locks_off_graph_unlock())
462 return 0;
463
Dave Jones2c522832013-04-25 13:40:02 -0400464 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
Ingo Molnar74c383f2006-12-13 00:34:43 -0800465 dump_stack();
466
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700467 return 0;
468 }
469
470 return 1;
471}
472
473unsigned int nr_hardirq_chains;
474unsigned int nr_softirq_chains;
475unsigned int nr_process_chains;
476unsigned int max_lockdep_depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700477
478#ifdef CONFIG_DEBUG_LOCKDEP
479/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700480 * Various lockdep statistics:
481 */
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200482DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700483#endif
484
485/*
486 * Locking printouts:
487 */
488
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100489#define __USAGE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +0100490 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
491 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
492 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
493 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100494
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700495static const char *usage_str[] =
496{
Peter Zijlstrafabe9c42009-01-22 14:51:01 +0100497#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
498#include "lockdep_states.h"
499#undef LOCKDEP_STATE
500 [LOCK_USED] = "INITIAL USE",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700501};
502
503const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
504{
Alexey Dobriyanffb45122007-05-08 00:28:41 -0700505 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700506}
507
Peter Zijlstra3ff176c2009-01-22 17:40:42 +0100508static inline unsigned long lock_flag(enum lock_usage_bit bit)
509{
510 return 1UL << bit;
511}
512
513static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
514{
515 char c = '.';
516
517 if (class->usage_mask & lock_flag(bit + 2))
518 c = '+';
519 if (class->usage_mask & lock_flag(bit)) {
520 c = '-';
521 if (class->usage_mask & lock_flag(bit + 2))
522 c = '?';
523 }
524
525 return c;
526}
527
Peter Zijlstraf510b232009-01-22 17:53:47 +0100528void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700529{
Peter Zijlstraf510b232009-01-22 17:53:47 +0100530 int i = 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700531
Peter Zijlstraf510b232009-01-22 17:53:47 +0100532#define LOCKDEP_STATE(__STATE) \
533 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
534 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
535#include "lockdep_states.h"
536#undef LOCKDEP_STATE
537
538 usage[i] = '\0';
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700539}
540
Steven Rostedte5e78d02011-11-02 20:24:16 -0400541static void __print_lock_name(struct lock_class *class)
Steven Rostedt3003eba2011-04-20 21:41:54 -0400542{
543 char str[KSYM_NAME_LEN];
544 const char *name;
545
546 name = class->name;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700547 if (!name) {
548 name = __get_key_name(class->key, str);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100549 printk(KERN_CONT "%s", name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700550 } else {
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100551 printk(KERN_CONT "%s", name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700552 if (class->name_version > 1)
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100553 printk(KERN_CONT "#%d", class->name_version);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700554 if (class->subclass)
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100555 printk(KERN_CONT "/%d", class->subclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700556 }
Steven Rostedte5e78d02011-11-02 20:24:16 -0400557}
558
559static void print_lock_name(struct lock_class *class)
560{
561 char usage[LOCK_USAGE_CHARS];
562
563 get_usage_chars(class, usage);
564
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100565 printk(KERN_CONT " (");
Steven Rostedte5e78d02011-11-02 20:24:16 -0400566 __print_lock_name(class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100567 printk(KERN_CONT "){%s}", usage);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700568}
569
570static void print_lockdep_cache(struct lockdep_map *lock)
571{
572 const char *name;
Tejun Heo9281ace2007-07-17 04:03:51 -0700573 char str[KSYM_NAME_LEN];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700574
575 name = lock->name;
576 if (!name)
577 name = __get_key_name(lock->key->subkeys, str);
578
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100579 printk(KERN_CONT "%s", name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700580}
581
582static void print_lock(struct held_lock *hlock)
583{
Peter Zijlstrad7bc3192015-04-15 17:11:57 +0200584 /*
585 * We can be called locklessly through debug_show_all_locks() so be
586 * extra careful, the hlock might have been released and cleared.
587 */
588 unsigned int class_idx = hlock->class_idx;
589
590 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
591 barrier();
592
593 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
Dmitry Vyukovf943fe02016-11-28 15:24:43 +0100594 printk(KERN_CONT "<RELEASED>\n");
Peter Zijlstrad7bc3192015-04-15 17:11:57 +0200595 return;
596 }
597
Tetsuo Handab3c39752018-03-27 19:41:41 +0900598 printk(KERN_CONT "%p", hlock->instance);
Peter Zijlstrad7bc3192015-04-15 17:11:57 +0200599 print_lock_name(lock_classes + class_idx - 1);
Tetsuo Handab3c39752018-03-27 19:41:41 +0900600 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700601}
602
Tetsuo Handa8cc05c712018-04-06 19:41:19 +0900603static void lockdep_print_held_locks(struct task_struct *p)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700604{
Tetsuo Handa8cc05c712018-04-06 19:41:19 +0900605 int i, depth = READ_ONCE(p->lockdep_depth);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700606
Tetsuo Handa8cc05c712018-04-06 19:41:19 +0900607 if (!depth)
608 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
609 else
610 printk("%d lock%s held by %s/%d:\n", depth,
611 depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
612 /*
613 * It's not reliable to print a task's held locks if it's not sleeping
614 * and it's not the current task.
615 */
616 if (p->state == TASK_RUNNING && p != current)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700617 return;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700618 for (i = 0; i < depth; i++) {
619 printk(" #%d: ", i);
Tetsuo Handa8cc05c712018-04-06 19:41:19 +0900620 print_lock(p->held_locks + i);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700621 }
622}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700623
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +0100624static void print_kernel_ident(void)
Dave Jones99de0552006-09-29 02:00:10 -0700625{
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +0100626 printk("%s %.*s %s\n", init_utsname()->release,
Serge E. Hallyn96b644b2006-10-02 02:18:13 -0700627 (int)strcspn(init_utsname()->version, " "),
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +0100628 init_utsname()->version,
629 print_tainted());
Dave Jones99de0552006-09-29 02:00:10 -0700630}
631
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700632static int very_verbose(struct lock_class *class)
633{
634#if VERY_VERBOSE
635 return class_filter(class);
636#endif
637 return 0;
638}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700639
640/*
641 * Is this the address of a static object:
642 */
Sasha Levin8dce7a92013-06-13 18:41:16 -0400643#ifdef __KERNEL__
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700644static int static_obj(void *obj)
645{
646 unsigned long start = (unsigned long) &_stext,
647 end = (unsigned long) &_end,
648 addr = (unsigned long) obj;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700649
650 /*
651 * static variable?
652 */
653 if ((addr >= start) && (addr < end))
654 return 1;
655
Mike Frysinger2a9ad182009-09-22 16:44:16 -0700656 if (arch_is_kernel_data(addr))
657 return 1;
658
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700659 /*
Tejun Heo10fad5e2010-03-10 18:57:54 +0900660 * in-kernel percpu var?
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700661 */
Tejun Heo10fad5e2010-03-10 18:57:54 +0900662 if (is_kernel_percpu_address(addr))
663 return 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700664
665 /*
Tejun Heo10fad5e2010-03-10 18:57:54 +0900666 * module static or percpu var?
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700667 */
Tejun Heo10fad5e2010-03-10 18:57:54 +0900668 return is_module_address(addr) || is_module_percpu_address(addr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700669}
Sasha Levin8dce7a92013-06-13 18:41:16 -0400670#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700671
672/*
673 * To make lock name printouts unique, we calculate a unique
Bart Van Asschefe27b0d2018-12-06 17:11:37 -0800674 * class->name_version generation counter. The caller must hold the graph
675 * lock.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700676 */
677static int count_matching_names(struct lock_class *new_class)
678{
679 struct lock_class *class;
680 int count = 0;
681
682 if (!new_class->name)
683 return 0;
684
Bart Van Asschefe27b0d2018-12-06 17:11:37 -0800685 list_for_each_entry(class, &all_lock_classes, lock_entry) {
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700686 if (new_class->key - new_class->subclass == class->key)
687 return class->name_version;
688 if (class->name && !strcmp(class->name, new_class->name))
689 count = max(count, class->name_version);
690 }
691
692 return count + 1;
693}
694
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700695static inline struct lock_class *
Matthew Wilcox08f36ff2018-01-17 07:14:13 -0800696look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700697{
698 struct lockdep_subclass_key *key;
Andrew Mortona63f38c2016-02-03 13:44:12 -0800699 struct hlist_head *hash_head;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700700 struct lock_class *class;
701
Hitoshi Mitake4ba053c2010-10-13 17:30:26 +0900702 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
703 debug_locks_off();
704 printk(KERN_ERR
705 "BUG: looking up invalid subclass: %u\n", subclass);
706 printk(KERN_ERR
707 "turning off the locking correctness validator.\n");
708 dump_stack();
709 return NULL;
710 }
711
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700712 /*
Matthew Wilcox64f29d12018-01-17 07:14:12 -0800713 * If it is not initialised then it has never been locked,
714 * so it won't be present in the hash table.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700715 */
Matthew Wilcox64f29d12018-01-17 07:14:12 -0800716 if (unlikely(!lock->key))
717 return NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700718
719 /*
720 * NOTE: the class-key must be unique. For dynamic locks, a static
721 * lock_class_key variable is passed in through the mutex_init()
722 * (or spin_lock_init()) call - which acts as the key. For static
723 * locks we use the lock object itself as the key.
724 */
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700725 BUILD_BUG_ON(sizeof(struct lock_class_key) >
726 sizeof(struct lockdep_map));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700727
728 key = lock->key->subkeys + subclass;
729
730 hash_head = classhashentry(key);
731
732 /*
Peter Zijlstra35a93932015-02-26 16:23:11 +0100733 * We do an RCU walk of the hash, see lockdep_free_key_range().
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700734 */
Peter Zijlstra35a93932015-02-26 16:23:11 +0100735 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
736 return NULL;
737
Andrew Mortona63f38c2016-02-03 13:44:12 -0800738 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700739 if (class->key == key) {
Peter Zijlstra0119fee2011-09-02 01:30:29 +0200740 /*
741 * Huh! same key, different name? Did someone trample
742 * on some memory? We're most confused.
743 */
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700744 WARN_ON_ONCE(class->name != lock->name);
Ingo Molnard6d897c2006-07-10 04:44:04 -0700745 return class;
Peter Zijlstra4b32d0a2007-07-19 01:48:59 -0700746 }
747 }
Ingo Molnard6d897c2006-07-10 04:44:04 -0700748
Matthew Wilcox64f29d12018-01-17 07:14:12 -0800749 return NULL;
750}
751
752/*
753 * Static locks do not have their class-keys yet - for them the key is
754 * the lock object itself. If the lock is in the per cpu area, the
755 * canonical address of the lock (per cpu offset removed) is used.
756 */
757static bool assign_lock_key(struct lockdep_map *lock)
758{
759 unsigned long can_addr, addr = (unsigned long)lock;
760
761 if (__is_kernel_percpu_address(addr, &can_addr))
762 lock->key = (void *)can_addr;
763 else if (__is_module_percpu_address(addr, &can_addr))
764 lock->key = (void *)can_addr;
765 else if (static_obj(lock))
766 lock->key = (void *)lock;
767 else {
768 /* Debug-check: all keys must be persistent! */
769 debug_locks_off();
770 pr_err("INFO: trying to register non-static key.\n");
771 pr_err("the code is fine but needs lockdep annotation.\n");
772 pr_err("turning off the locking correctness validator.\n");
773 dump_stack();
774 return false;
775 }
776
777 return true;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700778}
779
Bart Van Asscheb526b2e2019-02-14 15:00:51 -0800780/* Check whether element @e occurs in list @h */
781static bool in_list(struct list_head *e, struct list_head *h)
782{
783 struct list_head *f;
784
785 list_for_each(f, h) {
786 if (e == f)
787 return true;
788 }
789
790 return false;
791}
792
793/*
794 * Check whether entry @e occurs in any of the locks_after or locks_before
795 * lists.
796 */
797static bool in_any_class_list(struct list_head *e)
798{
799 struct lock_class *class;
800 int i;
801
802 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
803 class = &lock_classes[i];
804 if (in_list(e, &class->locks_after) ||
805 in_list(e, &class->locks_before))
806 return true;
807 }
808 return false;
809}
810
811static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
812{
813 struct lock_list *e;
814
815 list_for_each_entry(e, h, entry) {
816 if (e->links_to != c) {
817 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
818 c->name ? : "(?)",
819 (unsigned long)(e - list_entries),
820 e->links_to && e->links_to->name ?
821 e->links_to->name : "(?)",
822 e->class && e->class->name ? e->class->name :
823 "(?)");
824 return false;
825 }
826 }
827 return true;
828}
829
830static u16 chain_hlocks[];
831
832static bool check_lock_chain_key(struct lock_chain *chain)
833{
834#ifdef CONFIG_PROVE_LOCKING
835 u64 chain_key = 0;
836 int i;
837
838 for (i = chain->base; i < chain->base + chain->depth; i++)
839 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
840 /*
841 * The 'unsigned long long' casts avoid that a compiler warning
842 * is reported when building tools/lib/lockdep.
843 */
844 if (chain->chain_key != chain_key)
845 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
846 (unsigned long long)(chain - lock_chains),
847 (unsigned long long)chain->chain_key,
848 (unsigned long long)chain_key);
849 return chain->chain_key == chain_key;
850#else
851 return true;
852#endif
853}
854
855static bool in_any_zapped_class_list(struct lock_class *class)
856{
857 struct pending_free *pf;
858 int i;
859
860 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf);
861 i++, pf++)
862 if (in_list(&class->lock_entry, &pf->zapped))
863 return true;
864
865 return false;
866}
867
868static bool check_data_structures(void)
869{
870 struct lock_class *class;
871 struct lock_chain *chain;
872 struct hlist_head *head;
873 struct lock_list *e;
874 int i;
875
876 /* Check whether all classes occur in a lock list. */
877 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
878 class = &lock_classes[i];
879 if (!in_list(&class->lock_entry, &all_lock_classes) &&
880 !in_list(&class->lock_entry, &free_lock_classes) &&
881 !in_any_zapped_class_list(class)) {
882 printk(KERN_INFO "class %px/%s is not in any class list\n",
883 class, class->name ? : "(?)");
884 return false;
885 return false;
886 }
887 }
888
889 /* Check whether all classes have valid lock lists. */
890 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
891 class = &lock_classes[i];
892 if (!class_lock_list_valid(class, &class->locks_before))
893 return false;
894 if (!class_lock_list_valid(class, &class->locks_after))
895 return false;
896 }
897
898 /* Check the chain_key of all lock chains. */
899 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
900 head = chainhash_table + i;
901 hlist_for_each_entry_rcu(chain, head, entry) {
902 if (!check_lock_chain_key(chain))
903 return false;
904 }
905 }
906
907 /*
908 * Check whether all list entries that are in use occur in a class
909 * lock list.
910 */
911 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
912 e = list_entries + i;
913 if (!in_any_class_list(&e->entry)) {
914 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
915 (unsigned int)(e - list_entries),
916 e->class->name ? : "(?)",
917 e->links_to->name ? : "(?)");
918 return false;
919 }
920 }
921
922 /*
923 * Check whether all list entries that are not in use do not occur in
924 * a class lock list.
925 */
926 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
927 e = list_entries + i;
928 if (in_any_class_list(&e->entry)) {
929 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
930 (unsigned int)(e - list_entries),
931 e->class && e->class->name ? e->class->name :
932 "(?)",
933 e->links_to && e->links_to->name ?
934 e->links_to->name : "(?)");
935 return false;
936 }
937 }
938
939 return true;
940}
941
Ingo Molnard6d897c2006-07-10 04:44:04 -0700942/*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800943 * Initialize the lock_classes[] array elements, the free_lock_classes list
944 * and also the delayed_free structure.
Bart Van Asschefeb0a382019-02-14 15:00:42 -0800945 */
946static void init_data_structures_once(void)
947{
948 static bool initialization_happened;
949 int i;
950
951 if (likely(initialization_happened))
952 return;
953
954 initialization_happened = true;
955
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800956 init_rcu_head(&delayed_free.rcu_head);
957 INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
958 INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
959
Bart Van Asschefeb0a382019-02-14 15:00:42 -0800960 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
Bart Van Asschea0b0fd52019-02-14 15:00:46 -0800961 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
Bart Van Asschefeb0a382019-02-14 15:00:42 -0800962 INIT_LIST_HEAD(&lock_classes[i].locks_after);
963 INIT_LIST_HEAD(&lock_classes[i].locks_before);
964 }
965}
966
967/*
Ingo Molnard6d897c2006-07-10 04:44:04 -0700968 * Register a lock's class in the hash-table, if the class is not present
969 * yet. Otherwise we look it up. We cache the result in the lock object
970 * itself, so actual lookup of the hash should be once per lock object.
971 */
Denys Vlasenkoc003ed92016-04-08 20:58:46 +0200972static struct lock_class *
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -0400973register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
Ingo Molnard6d897c2006-07-10 04:44:04 -0700974{
975 struct lockdep_subclass_key *key;
Andrew Mortona63f38c2016-02-03 13:44:12 -0800976 struct hlist_head *hash_head;
Ingo Molnard6d897c2006-07-10 04:44:04 -0700977 struct lock_class *class;
Peter Zijlstra35a93932015-02-26 16:23:11 +0100978
979 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
Ingo Molnard6d897c2006-07-10 04:44:04 -0700980
981 class = look_up_lock_class(lock, subclass);
Matthew Wilcox64f29d12018-01-17 07:14:12 -0800982 if (likely(class))
Yong Zhang87cdee72011-11-09 16:07:14 +0800983 goto out_set_class_cache;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700984
Matthew Wilcox64f29d12018-01-17 07:14:12 -0800985 if (!lock->key) {
986 if (!assign_lock_key(lock))
987 return NULL;
988 } else if (!static_obj(lock->key)) {
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700989 return NULL;
990 }
991
Ingo Molnard6d897c2006-07-10 04:44:04 -0700992 key = lock->key->subkeys + subclass;
993 hash_head = classhashentry(key);
994
Ingo Molnar74c383f2006-12-13 00:34:43 -0800995 if (!graph_lock()) {
Ingo Molnar74c383f2006-12-13 00:34:43 -0800996 return NULL;
997 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700998 /*
999 * We have to do the hash-walk again, to avoid races
1000 * with another CPU:
1001 */
Andrew Mortona63f38c2016-02-03 13:44:12 -08001002 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001003 if (class->key == key)
1004 goto out_unlock_set;
Peter Zijlstra35a93932015-02-26 16:23:11 +01001005 }
1006
Bart Van Asschefeb0a382019-02-14 15:00:42 -08001007 init_data_structures_once();
1008
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08001009 /* Allocate a new lock class and add it to the hash. */
1010 class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1011 lock_entry);
1012 if (!class) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08001013 if (!debug_locks_off_graph_unlock()) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08001014 return NULL;
1015 }
Ingo Molnar74c383f2006-12-13 00:34:43 -08001016
Dave Jones2c522832013-04-25 13:40:02 -04001017 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01001018 dump_stack();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001019 return NULL;
1020 }
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08001021 nr_lock_classes++;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001022 debug_atomic_inc(nr_unused_locks);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001023 class->key = key;
1024 class->name = lock->name;
1025 class->subclass = subclass;
Bart Van Asschefeb0a382019-02-14 15:00:42 -08001026 WARN_ON_ONCE(!list_empty(&class->locks_before));
1027 WARN_ON_ONCE(!list_empty(&class->locks_after));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001028 class->name_version = count_matching_names(class);
1029 /*
1030 * We use RCU's safe list-add method to make
1031 * parallel walking of the hash-list safe:
1032 */
Andrew Mortona63f38c2016-02-03 13:44:12 -08001033 hlist_add_head_rcu(&class->hash_entry, hash_head);
Dale Farnsworth14811972008-02-25 23:03:02 +01001034 /*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08001035 * Remove the class from the free list and add it to the global list
1036 * of classes.
Dale Farnsworth14811972008-02-25 23:03:02 +01001037 */
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08001038 list_move_tail(&class->lock_entry, &all_lock_classes);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001039
1040 if (verbose(class)) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08001041 graph_unlock();
Ingo Molnar74c383f2006-12-13 00:34:43 -08001042
Borislav Petkov04860d42018-02-26 14:49:26 +01001043 printk("\nnew class %px: %s", class->key, class->name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001044 if (class->name_version > 1)
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001045 printk(KERN_CONT "#%d", class->name_version);
1046 printk(KERN_CONT "\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001047 dump_stack();
Ingo Molnar74c383f2006-12-13 00:34:43 -08001048
Ingo Molnar74c383f2006-12-13 00:34:43 -08001049 if (!graph_lock()) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08001050 return NULL;
1051 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001052 }
1053out_unlock_set:
Ingo Molnar74c383f2006-12-13 00:34:43 -08001054 graph_unlock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001055
Yong Zhang87cdee72011-11-09 16:07:14 +08001056out_set_class_cache:
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04001057 if (!subclass || force)
Hitoshi Mitake62016252010-10-05 18:01:51 +09001058 lock->class_cache[0] = class;
1059 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1060 lock->class_cache[subclass] = class;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001061
Peter Zijlstra0119fee2011-09-02 01:30:29 +02001062 /*
1063 * Hash collision, did we smoke some? We found a class with a matching
1064 * hash but the subclass -- which is hashed in -- didn't match.
1065 */
Jarek Poplawski381a2292007-02-10 01:44:58 -08001066 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1067 return NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001068
1069 return class;
1070}
1071
Peter Zijlstraca58abc2007-07-19 01:48:53 -07001072#ifdef CONFIG_PROVE_LOCKING
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07001073/*
Peter Zijlstra8e182572007-07-19 01:48:54 -07001074 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1075 * with NULL on failure)
1076 */
1077static struct lock_list *alloc_list_entry(void)
1078{
Bart Van Asscheace35a72019-02-14 15:00:47 -08001079 int idx = find_first_zero_bit(list_entries_in_use,
1080 ARRAY_SIZE(list_entries));
1081
1082 if (idx >= ARRAY_SIZE(list_entries)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07001083 if (!debug_locks_off_graph_unlock())
1084 return NULL;
1085
Dave Jones2c522832013-04-25 13:40:02 -04001086 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01001087 dump_stack();
Peter Zijlstra8e182572007-07-19 01:48:54 -07001088 return NULL;
1089 }
Bart Van Asscheace35a72019-02-14 15:00:47 -08001090 nr_list_entries++;
1091 __set_bit(idx, list_entries_in_use);
1092 return list_entries + idx;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001093}
1094
1095/*
1096 * Add a new dependency to the head of the list:
1097 */
Bart Van Assche86cffb82019-02-14 15:00:41 -08001098static int add_lock_to_list(struct lock_class *this,
1099 struct lock_class *links_to, struct list_head *head,
Tahsin Erdogan83f06162016-11-08 00:02:07 -08001100 unsigned long ip, int distance,
1101 struct stack_trace *trace)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001102{
1103 struct lock_list *entry;
1104 /*
1105 * Lock not present yet - get a new dependency struct and
1106 * add it to the list:
1107 */
1108 entry = alloc_list_entry();
1109 if (!entry)
1110 return 0;
1111
Zhu Yi74870172008-08-27 14:33:00 +08001112 entry->class = this;
Bart Van Assche86cffb82019-02-14 15:00:41 -08001113 entry->links_to = links_to;
Zhu Yi74870172008-08-27 14:33:00 +08001114 entry->distance = distance;
Yong Zhang4726f2a2010-05-04 14:16:48 +08001115 entry->trace = *trace;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001116 /*
Peter Zijlstra35a93932015-02-26 16:23:11 +01001117 * Both allocation and removal are done under the graph lock; but
1118 * iteration is under RCU-sched; see look_up_lock_class() and
1119 * lockdep_free_key_range().
Peter Zijlstra8e182572007-07-19 01:48:54 -07001120 */
1121 list_add_tail_rcu(&entry->entry, head);
1122
1123 return 1;
1124}
1125
Peter Zijlstra98c33ed2009-07-21 13:19:07 +02001126/*
1127 * For good efficiency of modular, we use power of 2
1128 */
Peter Zijlstraaf012962009-07-16 15:44:29 +02001129#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1130#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1131
Peter Zijlstra98c33ed2009-07-21 13:19:07 +02001132/*
1133 * The circular_queue and helpers is used to implement the
Peter Zijlstraaf012962009-07-16 15:44:29 +02001134 * breadth-first search(BFS)algorithem, by which we can build
1135 * the shortest path from the next lock to be acquired to the
1136 * previous held lock if there is a circular between them.
Peter Zijlstra98c33ed2009-07-21 13:19:07 +02001137 */
Peter Zijlstraaf012962009-07-16 15:44:29 +02001138struct circular_queue {
1139 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
1140 unsigned int front, rear;
1141};
1142
1143static struct circular_queue lock_cq;
Peter Zijlstraaf012962009-07-16 15:44:29 +02001144
Ming Lei12f3dfd2009-07-16 15:44:29 +02001145unsigned int max_bfs_queue_depth;
Peter Zijlstraaf012962009-07-16 15:44:29 +02001146
Ming Leie351b662009-07-22 22:48:09 +08001147static unsigned int lockdep_dependency_gen_id;
1148
Peter Zijlstraaf012962009-07-16 15:44:29 +02001149static inline void __cq_init(struct circular_queue *cq)
1150{
1151 cq->front = cq->rear = 0;
Ming Leie351b662009-07-22 22:48:09 +08001152 lockdep_dependency_gen_id++;
Peter Zijlstraaf012962009-07-16 15:44:29 +02001153}
1154
1155static inline int __cq_empty(struct circular_queue *cq)
1156{
1157 return (cq->front == cq->rear);
1158}
1159
1160static inline int __cq_full(struct circular_queue *cq)
1161{
1162 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1163}
1164
1165static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
1166{
1167 if (__cq_full(cq))
1168 return -1;
1169
1170 cq->element[cq->rear] = elem;
1171 cq->rear = (cq->rear + 1) & CQ_MASK;
1172 return 0;
1173}
1174
1175static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
1176{
1177 if (__cq_empty(cq))
1178 return -1;
1179
1180 *elem = cq->element[cq->front];
1181 cq->front = (cq->front + 1) & CQ_MASK;
1182 return 0;
1183}
1184
1185static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1186{
1187 return (cq->rear - cq->front) & CQ_MASK;
1188}
1189
1190static inline void mark_lock_accessed(struct lock_list *lock,
1191 struct lock_list *parent)
1192{
1193 unsigned long nr;
Peter Zijlstra98c33ed2009-07-21 13:19:07 +02001194
Peter Zijlstraaf012962009-07-16 15:44:29 +02001195 nr = lock - list_entries;
Bart Van Asscheace35a72019-02-14 15:00:47 -08001196 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
Peter Zijlstraaf012962009-07-16 15:44:29 +02001197 lock->parent = parent;
Ming Leie351b662009-07-22 22:48:09 +08001198 lock->class->dep_gen_id = lockdep_dependency_gen_id;
Peter Zijlstraaf012962009-07-16 15:44:29 +02001199}
1200
1201static inline unsigned long lock_accessed(struct lock_list *lock)
1202{
1203 unsigned long nr;
Peter Zijlstra98c33ed2009-07-21 13:19:07 +02001204
Peter Zijlstraaf012962009-07-16 15:44:29 +02001205 nr = lock - list_entries;
Bart Van Asscheace35a72019-02-14 15:00:47 -08001206 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
Ming Leie351b662009-07-22 22:48:09 +08001207 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
Peter Zijlstraaf012962009-07-16 15:44:29 +02001208}
1209
1210static inline struct lock_list *get_lock_parent(struct lock_list *child)
1211{
1212 return child->parent;
1213}
1214
1215static inline int get_lock_depth(struct lock_list *child)
1216{
1217 int depth = 0;
1218 struct lock_list *parent;
1219
1220 while ((parent = get_lock_parent(child))) {
1221 child = parent;
1222 depth++;
1223 }
1224 return depth;
1225}
1226
Ming Lei9e2d5512009-07-16 15:44:29 +02001227static int __bfs(struct lock_list *source_entry,
Peter Zijlstraaf012962009-07-16 15:44:29 +02001228 void *data,
1229 int (*match)(struct lock_list *entry, void *data),
1230 struct lock_list **target_entry,
1231 int forward)
Ming Leic94aa5c2009-07-16 15:44:29 +02001232{
1233 struct lock_list *entry;
Ming Leid588e462009-07-16 15:44:29 +02001234 struct list_head *head;
Ming Leic94aa5c2009-07-16 15:44:29 +02001235 struct circular_queue *cq = &lock_cq;
1236 int ret = 1;
1237
Ming Lei9e2d5512009-07-16 15:44:29 +02001238 if (match(source_entry, data)) {
Ming Leic94aa5c2009-07-16 15:44:29 +02001239 *target_entry = source_entry;
1240 ret = 0;
1241 goto exit;
1242 }
1243
Ming Leid588e462009-07-16 15:44:29 +02001244 if (forward)
1245 head = &source_entry->class->locks_after;
1246 else
1247 head = &source_entry->class->locks_before;
1248
1249 if (list_empty(head))
1250 goto exit;
1251
1252 __cq_init(cq);
Ming Leic94aa5c2009-07-16 15:44:29 +02001253 __cq_enqueue(cq, (unsigned long)source_entry);
1254
1255 while (!__cq_empty(cq)) {
1256 struct lock_list *lock;
Ming Leic94aa5c2009-07-16 15:44:29 +02001257
1258 __cq_dequeue(cq, (unsigned long *)&lock);
1259
1260 if (!lock->class) {
1261 ret = -2;
1262 goto exit;
1263 }
1264
1265 if (forward)
1266 head = &lock->class->locks_after;
1267 else
1268 head = &lock->class->locks_before;
1269
Peter Zijlstra35a93932015-02-26 16:23:11 +01001270 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1271
1272 list_for_each_entry_rcu(entry, head, entry) {
Ming Leic94aa5c2009-07-16 15:44:29 +02001273 if (!lock_accessed(entry)) {
Ming Lei12f3dfd2009-07-16 15:44:29 +02001274 unsigned int cq_depth;
Ming Leic94aa5c2009-07-16 15:44:29 +02001275 mark_lock_accessed(entry, lock);
Ming Lei9e2d5512009-07-16 15:44:29 +02001276 if (match(entry, data)) {
Ming Leic94aa5c2009-07-16 15:44:29 +02001277 *target_entry = entry;
1278 ret = 0;
1279 goto exit;
1280 }
1281
1282 if (__cq_enqueue(cq, (unsigned long)entry)) {
1283 ret = -1;
1284 goto exit;
1285 }
Ming Lei12f3dfd2009-07-16 15:44:29 +02001286 cq_depth = __cq_get_elem_count(cq);
1287 if (max_bfs_queue_depth < cq_depth)
1288 max_bfs_queue_depth = cq_depth;
Ming Leic94aa5c2009-07-16 15:44:29 +02001289 }
1290 }
1291 }
1292exit:
1293 return ret;
1294}
1295
Ming Leid7aaba12009-07-16 15:44:29 +02001296static inline int __bfs_forwards(struct lock_list *src_entry,
Ming Lei9e2d5512009-07-16 15:44:29 +02001297 void *data,
1298 int (*match)(struct lock_list *entry, void *data),
1299 struct lock_list **target_entry)
Ming Leic94aa5c2009-07-16 15:44:29 +02001300{
Ming Lei9e2d5512009-07-16 15:44:29 +02001301 return __bfs(src_entry, data, match, target_entry, 1);
Ming Leic94aa5c2009-07-16 15:44:29 +02001302
1303}
1304
Ming Leid7aaba12009-07-16 15:44:29 +02001305static inline int __bfs_backwards(struct lock_list *src_entry,
Ming Lei9e2d5512009-07-16 15:44:29 +02001306 void *data,
1307 int (*match)(struct lock_list *entry, void *data),
1308 struct lock_list **target_entry)
Ming Leic94aa5c2009-07-16 15:44:29 +02001309{
Ming Lei9e2d5512009-07-16 15:44:29 +02001310 return __bfs(src_entry, data, match, target_entry, 0);
Ming Leic94aa5c2009-07-16 15:44:29 +02001311
1312}
1313
Peter Zijlstra8e182572007-07-19 01:48:54 -07001314/*
1315 * Recursive, forwards-direction lock-dependency checking, used for
1316 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1317 * checking.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001318 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07001319
1320/*
1321 * Print a dependency chain entry (this is only done when a deadlock
1322 * has been detected):
1323 */
1324static noinline int
Ming Lei24208ca2009-07-16 15:44:29 +02001325print_circular_bug_entry(struct lock_list *target, int depth)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001326{
1327 if (debug_locks_silent)
1328 return 0;
1329 printk("\n-> #%u", depth);
1330 print_lock_name(target->class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001331 printk(KERN_CONT ":\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001332 print_stack_trace(&target->trace, 6);
1333
1334 return 0;
1335}
1336
Steven Rostedtf4185812011-04-20 21:41:55 -04001337static void
1338print_circular_lock_scenario(struct held_lock *src,
1339 struct held_lock *tgt,
1340 struct lock_list *prt)
1341{
1342 struct lock_class *source = hlock_class(src);
1343 struct lock_class *target = hlock_class(tgt);
1344 struct lock_class *parent = prt->class;
1345
1346 /*
1347 * A direct locking problem where unsafe_class lock is taken
1348 * directly by safe_class lock, then all we need to show
1349 * is the deadlock scenario, as it is obvious that the
1350 * unsafe lock is taken under the safe lock.
1351 *
1352 * But if there is a chain instead, where the safe lock takes
1353 * an intermediate lock (middle_class) where this lock is
1354 * not the same as the safe lock, then the lock chain is
1355 * used to describe the problem. Otherwise we would need
1356 * to show a different CPU case for each link in the chain
1357 * from the safe_class lock to the unsafe_class lock.
1358 */
1359 if (parent != source) {
1360 printk("Chain exists of:\n ");
1361 __print_lock_name(source);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001362 printk(KERN_CONT " --> ");
Steven Rostedtf4185812011-04-20 21:41:55 -04001363 __print_lock_name(parent);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001364 printk(KERN_CONT " --> ");
Steven Rostedtf4185812011-04-20 21:41:55 -04001365 __print_lock_name(target);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001366 printk(KERN_CONT "\n\n");
Steven Rostedtf4185812011-04-20 21:41:55 -04001367 }
1368
Ingo Molnare966eae2017-12-12 12:31:16 +01001369 printk(" Possible unsafe locking scenario:\n\n");
1370 printk(" CPU0 CPU1\n");
1371 printk(" ---- ----\n");
1372 printk(" lock(");
1373 __print_lock_name(target);
1374 printk(KERN_CONT ");\n");
1375 printk(" lock(");
1376 __print_lock_name(parent);
1377 printk(KERN_CONT ");\n");
1378 printk(" lock(");
1379 __print_lock_name(target);
1380 printk(KERN_CONT ");\n");
1381 printk(" lock(");
1382 __print_lock_name(source);
1383 printk(KERN_CONT ");\n");
1384 printk("\n *** DEADLOCK ***\n\n");
Steven Rostedtf4185812011-04-20 21:41:55 -04001385}
1386
Peter Zijlstra8e182572007-07-19 01:48:54 -07001387/*
1388 * When a circular dependency is detected, print the
1389 * header first:
1390 */
1391static noinline int
Ming Leidb0002a2009-07-16 15:44:29 +02001392print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1393 struct held_lock *check_src,
1394 struct held_lock *check_tgt)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001395{
1396 struct task_struct *curr = current;
1397
Ming Leic94aa5c2009-07-16 15:44:29 +02001398 if (debug_locks_silent)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001399 return 0;
1400
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001401 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001402 pr_warn("======================================================\n");
1403 pr_warn("WARNING: possible circular locking dependency detected\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01001404 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001405 pr_warn("------------------------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001406 pr_warn("%s/%d is trying to acquire lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001407 curr->comm, task_pid_nr(curr));
Ming Leidb0002a2009-07-16 15:44:29 +02001408 print_lock(check_src);
Byungchul Park383a4bc2017-08-07 16:12:55 +09001409
Ingo Molnare966eae2017-12-12 12:31:16 +01001410 pr_warn("\nbut task is already holding lock:\n");
Byungchul Park383a4bc2017-08-07 16:12:55 +09001411
Ming Leidb0002a2009-07-16 15:44:29 +02001412 print_lock(check_tgt);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001413 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1414 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001415
1416 print_circular_bug_entry(entry, depth);
1417
1418 return 0;
1419}
1420
Ming Lei9e2d5512009-07-16 15:44:29 +02001421static inline int class_equal(struct lock_list *entry, void *data)
1422{
1423 return entry->class == data;
1424}
1425
Ming Leidb0002a2009-07-16 15:44:29 +02001426static noinline int print_circular_bug(struct lock_list *this,
1427 struct lock_list *target,
1428 struct held_lock *check_src,
Byungchul Park383a4bc2017-08-07 16:12:55 +09001429 struct held_lock *check_tgt,
1430 struct stack_trace *trace)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001431{
1432 struct task_struct *curr = current;
Ming Leic94aa5c2009-07-16 15:44:29 +02001433 struct lock_list *parent;
Steven Rostedtf4185812011-04-20 21:41:55 -04001434 struct lock_list *first_parent;
Ming Lei24208ca2009-07-16 15:44:29 +02001435 int depth;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001436
Ming Leic94aa5c2009-07-16 15:44:29 +02001437 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001438 return 0;
1439
Ingo Molnare966eae2017-12-12 12:31:16 +01001440 if (!save_trace(&this->trace))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001441 return 0;
1442
Ming Leic94aa5c2009-07-16 15:44:29 +02001443 depth = get_lock_depth(target);
1444
Ming Leidb0002a2009-07-16 15:44:29 +02001445 print_circular_bug_header(target, depth, check_src, check_tgt);
Ming Leic94aa5c2009-07-16 15:44:29 +02001446
1447 parent = get_lock_parent(target);
Steven Rostedtf4185812011-04-20 21:41:55 -04001448 first_parent = parent;
Ming Leic94aa5c2009-07-16 15:44:29 +02001449
1450 while (parent) {
1451 print_circular_bug_entry(parent, --depth);
1452 parent = get_lock_parent(parent);
1453 }
Peter Zijlstra8e182572007-07-19 01:48:54 -07001454
1455 printk("\nother info that might help us debug this:\n\n");
Steven Rostedtf4185812011-04-20 21:41:55 -04001456 print_circular_lock_scenario(check_src, check_tgt,
1457 first_parent);
1458
Peter Zijlstra8e182572007-07-19 01:48:54 -07001459 lockdep_print_held_locks(curr);
1460
1461 printk("\nstack backtrace:\n");
1462 dump_stack();
1463
1464 return 0;
1465}
1466
Ming Leidb0002a2009-07-16 15:44:29 +02001467static noinline int print_bfs_bug(int ret)
1468{
1469 if (!debug_locks_off_graph_unlock())
1470 return 0;
1471
Peter Zijlstra0119fee2011-09-02 01:30:29 +02001472 /*
1473 * Breadth-first-search failed, graph got corrupted?
1474 */
Ming Leidb0002a2009-07-16 15:44:29 +02001475 WARN(1, "lockdep bfs error:%d\n", ret);
1476
1477 return 0;
1478}
1479
Ming Leief681022009-07-16 15:44:29 +02001480static int noop_count(struct lock_list *entry, void *data)
David Miller419ca3f2008-07-29 21:45:03 -07001481{
Ming Leief681022009-07-16 15:44:29 +02001482 (*(unsigned long *)data)++;
1483 return 0;
David Miller419ca3f2008-07-29 21:45:03 -07001484}
1485
Fengguang Wu5216d532013-11-09 00:55:35 +08001486static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
Ming Leief681022009-07-16 15:44:29 +02001487{
1488 unsigned long count = 0;
1489 struct lock_list *uninitialized_var(target_entry);
1490
1491 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1492
1493 return count;
1494}
David Miller419ca3f2008-07-29 21:45:03 -07001495unsigned long lockdep_count_forward_deps(struct lock_class *class)
1496{
1497 unsigned long ret, flags;
Ming Leief681022009-07-16 15:44:29 +02001498 struct lock_list this;
1499
1500 this.parent = NULL;
1501 this.class = class;
David Miller419ca3f2008-07-29 21:45:03 -07001502
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04001503 raw_local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001504 arch_spin_lock(&lockdep_lock);
Ming Leief681022009-07-16 15:44:29 +02001505 ret = __lockdep_count_forward_deps(&this);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001506 arch_spin_unlock(&lockdep_lock);
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04001507 raw_local_irq_restore(flags);
David Miller419ca3f2008-07-29 21:45:03 -07001508
1509 return ret;
1510}
1511
Fengguang Wu5216d532013-11-09 00:55:35 +08001512static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
David Miller419ca3f2008-07-29 21:45:03 -07001513{
Ming Leief681022009-07-16 15:44:29 +02001514 unsigned long count = 0;
1515 struct lock_list *uninitialized_var(target_entry);
David Miller419ca3f2008-07-29 21:45:03 -07001516
Ming Leief681022009-07-16 15:44:29 +02001517 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
David Miller419ca3f2008-07-29 21:45:03 -07001518
Ming Leief681022009-07-16 15:44:29 +02001519 return count;
David Miller419ca3f2008-07-29 21:45:03 -07001520}
1521
1522unsigned long lockdep_count_backward_deps(struct lock_class *class)
1523{
1524 unsigned long ret, flags;
Ming Leief681022009-07-16 15:44:29 +02001525 struct lock_list this;
1526
1527 this.parent = NULL;
1528 this.class = class;
David Miller419ca3f2008-07-29 21:45:03 -07001529
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04001530 raw_local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001531 arch_spin_lock(&lockdep_lock);
Ming Leief681022009-07-16 15:44:29 +02001532 ret = __lockdep_count_backward_deps(&this);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001533 arch_spin_unlock(&lockdep_lock);
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04001534 raw_local_irq_restore(flags);
David Miller419ca3f2008-07-29 21:45:03 -07001535
1536 return ret;
1537}
1538
Peter Zijlstra8e182572007-07-19 01:48:54 -07001539/*
1540 * Prove that the dependency graph starting at <entry> can not
1541 * lead to <target>. Print an error and return 0 if it does.
1542 */
1543static noinline int
Ming Leidb0002a2009-07-16 15:44:29 +02001544check_noncircular(struct lock_list *root, struct lock_class *target,
1545 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001546{
Ming Leidb0002a2009-07-16 15:44:29 +02001547 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001548
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001549 debug_atomic_inc(nr_cyclic_checks);
David Miller419ca3f2008-07-29 21:45:03 -07001550
Ming Leid7aaba12009-07-16 15:44:29 +02001551 result = __bfs_forwards(root, target, class_equal, target_entry);
Ming Leidb0002a2009-07-16 15:44:29 +02001552
1553 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001554}
1555
Peter Zijlstraae813302017-03-03 10:13:38 +01001556static noinline int
1557check_redundant(struct lock_list *root, struct lock_class *target,
1558 struct lock_list **target_entry)
1559{
1560 int result;
1561
1562 debug_atomic_inc(nr_redundant_checks);
1563
1564 result = __bfs_forwards(root, target, class_equal, target_entry);
1565
1566 return result;
1567}
1568
Steven Rostedt81d68a92008-05-12 21:20:42 +02001569#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001570/*
1571 * Forwards and backwards subgraph searching, for the purposes of
1572 * proving that two subgraphs can be connected by a new dependency
1573 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1574 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07001575
Ming Leid7aaba12009-07-16 15:44:29 +02001576static inline int usage_match(struct lock_list *entry, void *bit)
1577{
1578 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1579}
1580
1581
1582
Peter Zijlstra8e182572007-07-19 01:48:54 -07001583/*
1584 * Find a node in the forwards-direction dependency sub-graph starting
Ming Leid7aaba12009-07-16 15:44:29 +02001585 * at @root->class that matches @bit.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001586 *
Ming Leid7aaba12009-07-16 15:44:29 +02001587 * Return 0 if such a node exists in the subgraph, and put that node
1588 * into *@target_entry.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001589 *
Ming Leid7aaba12009-07-16 15:44:29 +02001590 * Return 1 otherwise and keep *@target_entry unchanged.
1591 * Return <0 on error.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001592 */
Ming Leid7aaba12009-07-16 15:44:29 +02001593static int
1594find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1595 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001596{
Ming Leid7aaba12009-07-16 15:44:29 +02001597 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001598
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001599 debug_atomic_inc(nr_find_usage_forwards_checks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001600
Ming Leid7aaba12009-07-16 15:44:29 +02001601 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1602
1603 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001604}
1605
1606/*
1607 * Find a node in the backwards-direction dependency sub-graph starting
Ming Leid7aaba12009-07-16 15:44:29 +02001608 * at @root->class that matches @bit.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001609 *
Ming Leid7aaba12009-07-16 15:44:29 +02001610 * Return 0 if such a node exists in the subgraph, and put that node
1611 * into *@target_entry.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001612 *
Ming Leid7aaba12009-07-16 15:44:29 +02001613 * Return 1 otherwise and keep *@target_entry unchanged.
1614 * Return <0 on error.
Peter Zijlstra8e182572007-07-19 01:48:54 -07001615 */
Ming Leid7aaba12009-07-16 15:44:29 +02001616static int
1617find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1618 struct lock_list **target_entry)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001619{
Ming Leid7aaba12009-07-16 15:44:29 +02001620 int result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001621
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02001622 debug_atomic_inc(nr_find_usage_backwards_checks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001623
Ming Leid7aaba12009-07-16 15:44:29 +02001624 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
Dave Jonesf82b2172008-08-11 09:30:23 +02001625
Ming Leid7aaba12009-07-16 15:44:29 +02001626 return result;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001627}
1628
Peter Zijlstraaf012962009-07-16 15:44:29 +02001629static void print_lock_class_header(struct lock_class *class, int depth)
1630{
1631 int bit;
1632
1633 printk("%*s->", depth, "");
1634 print_lock_name(class);
Waiman Long8ca2b56c2018-10-03 13:07:18 -04001635#ifdef CONFIG_DEBUG_LOCKDEP
1636 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1637#endif
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001638 printk(KERN_CONT " {\n");
Peter Zijlstraaf012962009-07-16 15:44:29 +02001639
1640 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1641 if (class->usage_mask & (1 << bit)) {
1642 int len = depth;
1643
1644 len += printk("%*s %s", depth, "", usage_str[bit]);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001645 len += printk(KERN_CONT " at:\n");
Peter Zijlstraaf012962009-07-16 15:44:29 +02001646 print_stack_trace(class->usage_traces + bit, len);
1647 }
1648 }
1649 printk("%*s }\n", depth, "");
1650
Borislav Petkov04860d42018-02-26 14:49:26 +01001651 printk("%*s ... key at: [<%px>] %pS\n",
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001652 depth, "", class->key, class->key);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001653}
1654
1655/*
1656 * printk the shortest lock dependencies from @start to @end in reverse order:
1657 */
1658static void __used
1659print_shortest_lock_dependencies(struct lock_list *leaf,
1660 struct lock_list *root)
1661{
1662 struct lock_list *entry = leaf;
1663 int depth;
1664
1665 /*compute depth from generated tree by BFS*/
1666 depth = get_lock_depth(leaf);
1667
1668 do {
1669 print_lock_class_header(entry->class, depth);
1670 printk("%*s ... acquired at:\n", depth, "");
1671 print_stack_trace(&entry->trace, 2);
1672 printk("\n");
1673
1674 if (depth == 0 && (entry != root)) {
Steven Rostedt6be8c392011-04-20 21:41:58 -04001675 printk("lockdep:%s bad path found in chain graph\n", __func__);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001676 break;
1677 }
1678
1679 entry = get_lock_parent(entry);
1680 depth--;
1681 } while (entry && (depth >= 0));
1682
1683 return;
1684}
Ming Leid7aaba12009-07-16 15:44:29 +02001685
Steven Rostedt3003eba2011-04-20 21:41:54 -04001686static void
1687print_irq_lock_scenario(struct lock_list *safe_entry,
1688 struct lock_list *unsafe_entry,
Steven Rostedtdad3d742011-04-20 21:41:57 -04001689 struct lock_class *prev_class,
1690 struct lock_class *next_class)
Steven Rostedt3003eba2011-04-20 21:41:54 -04001691{
1692 struct lock_class *safe_class = safe_entry->class;
1693 struct lock_class *unsafe_class = unsafe_entry->class;
Steven Rostedtdad3d742011-04-20 21:41:57 -04001694 struct lock_class *middle_class = prev_class;
Steven Rostedt3003eba2011-04-20 21:41:54 -04001695
1696 if (middle_class == safe_class)
Steven Rostedtdad3d742011-04-20 21:41:57 -04001697 middle_class = next_class;
Steven Rostedt3003eba2011-04-20 21:41:54 -04001698
1699 /*
1700 * A direct locking problem where unsafe_class lock is taken
1701 * directly by safe_class lock, then all we need to show
1702 * is the deadlock scenario, as it is obvious that the
1703 * unsafe lock is taken under the safe lock.
1704 *
1705 * But if there is a chain instead, where the safe lock takes
1706 * an intermediate lock (middle_class) where this lock is
1707 * not the same as the safe lock, then the lock chain is
1708 * used to describe the problem. Otherwise we would need
1709 * to show a different CPU case for each link in the chain
1710 * from the safe_class lock to the unsafe_class lock.
1711 */
1712 if (middle_class != unsafe_class) {
1713 printk("Chain exists of:\n ");
1714 __print_lock_name(safe_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001715 printk(KERN_CONT " --> ");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001716 __print_lock_name(middle_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001717 printk(KERN_CONT " --> ");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001718 __print_lock_name(unsafe_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001719 printk(KERN_CONT "\n\n");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001720 }
1721
1722 printk(" Possible interrupt unsafe locking scenario:\n\n");
1723 printk(" CPU0 CPU1\n");
1724 printk(" ---- ----\n");
1725 printk(" lock(");
1726 __print_lock_name(unsafe_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001727 printk(KERN_CONT ");\n");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001728 printk(" local_irq_disable();\n");
1729 printk(" lock(");
1730 __print_lock_name(safe_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001731 printk(KERN_CONT ");\n");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001732 printk(" lock(");
1733 __print_lock_name(middle_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001734 printk(KERN_CONT ");\n");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001735 printk(" <Interrupt>\n");
1736 printk(" lock(");
1737 __print_lock_name(safe_class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001738 printk(KERN_CONT ");\n");
Steven Rostedt3003eba2011-04-20 21:41:54 -04001739 printk("\n *** DEADLOCK ***\n\n");
1740}
1741
Peter Zijlstra8e182572007-07-19 01:48:54 -07001742static int
1743print_bad_irq_dependency(struct task_struct *curr,
Ming Lei24208ca2009-07-16 15:44:29 +02001744 struct lock_list *prev_root,
1745 struct lock_list *next_root,
1746 struct lock_list *backwards_entry,
1747 struct lock_list *forwards_entry,
Peter Zijlstra8e182572007-07-19 01:48:54 -07001748 struct held_lock *prev,
1749 struct held_lock *next,
1750 enum lock_usage_bit bit1,
1751 enum lock_usage_bit bit2,
1752 const char *irqclass)
1753{
1754 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1755 return 0;
1756
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001757 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001758 pr_warn("=====================================================\n");
1759 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
Peter Zijlstra8e182572007-07-19 01:48:54 -07001760 irqclass, irqclass);
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01001761 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001762 pr_warn("-----------------------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001763 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001764 curr->comm, task_pid_nr(curr),
Peter Zijlstra8e182572007-07-19 01:48:54 -07001765 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1766 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1767 curr->hardirqs_enabled,
1768 curr->softirqs_enabled);
1769 print_lock(next);
1770
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001771 pr_warn("\nand this task is already holding:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001772 print_lock(prev);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001773 pr_warn("which would create a new lock dependency:\n");
Dave Jonesf82b2172008-08-11 09:30:23 +02001774 print_lock_name(hlock_class(prev));
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001775 pr_cont(" ->");
Dave Jonesf82b2172008-08-11 09:30:23 +02001776 print_lock_name(hlock_class(next));
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001777 pr_cont("\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001778
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001779 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
Peter Zijlstra8e182572007-07-19 01:48:54 -07001780 irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001781 print_lock_name(backwards_entry->class);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001782 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001783
Ming Lei24208ca2009-07-16 15:44:29 +02001784 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001785
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001786 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001787 print_lock_name(forwards_entry->class);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001788 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1789 pr_warn("...");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001790
Ming Lei24208ca2009-07-16 15:44:29 +02001791 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001792
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001793 pr_warn("\nother info that might help us debug this:\n\n");
Steven Rostedtdad3d742011-04-20 21:41:57 -04001794 print_irq_lock_scenario(backwards_entry, forwards_entry,
1795 hlock_class(prev), hlock_class(next));
Steven Rostedt3003eba2011-04-20 21:41:54 -04001796
Peter Zijlstra8e182572007-07-19 01:48:54 -07001797 lockdep_print_held_locks(curr);
1798
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001799 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001800 if (!save_trace(&prev_root->trace))
1801 return 0;
1802 print_shortest_lock_dependencies(backwards_entry, prev_root);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001803
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001804 pr_warn("\nthe dependencies between the lock to be acquired");
1805 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02001806 if (!save_trace(&next_root->trace))
1807 return 0;
1808 print_shortest_lock_dependencies(forwards_entry, next_root);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001809
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001810 pr_warn("\nstack backtrace:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001811 dump_stack();
1812
1813 return 0;
1814}
1815
1816static int
1817check_usage(struct task_struct *curr, struct held_lock *prev,
1818 struct held_lock *next, enum lock_usage_bit bit_backwards,
1819 enum lock_usage_bit bit_forwards, const char *irqclass)
1820{
1821 int ret;
Ming Lei24208ca2009-07-16 15:44:29 +02001822 struct lock_list this, that;
Ming Leid7aaba12009-07-16 15:44:29 +02001823 struct lock_list *uninitialized_var(target_entry);
Ming Lei24208ca2009-07-16 15:44:29 +02001824 struct lock_list *uninitialized_var(target_entry1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001825
Ming Leid7aaba12009-07-16 15:44:29 +02001826 this.parent = NULL;
Peter Zijlstra8e182572007-07-19 01:48:54 -07001827
Ming Leid7aaba12009-07-16 15:44:29 +02001828 this.class = hlock_class(prev);
1829 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001830 if (ret < 0)
1831 return print_bfs_bug(ret);
1832 if (ret == 1)
1833 return ret;
Ming Leid7aaba12009-07-16 15:44:29 +02001834
Ming Lei24208ca2009-07-16 15:44:29 +02001835 that.parent = NULL;
1836 that.class = hlock_class(next);
1837 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
Peter Zijlstraaf012962009-07-16 15:44:29 +02001838 if (ret < 0)
1839 return print_bfs_bug(ret);
1840 if (ret == 1)
1841 return ret;
Ming Leid7aaba12009-07-16 15:44:29 +02001842
Ming Lei24208ca2009-07-16 15:44:29 +02001843 return print_bad_irq_dependency(curr, &this, &that,
1844 target_entry, target_entry1,
1845 prev, next,
Peter Zijlstra8e182572007-07-19 01:48:54 -07001846 bit_backwards, bit_forwards, irqclass);
1847}
1848
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001849static const char *state_names[] = {
1850#define LOCKDEP_STATE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +01001851 __stringify(__STATE),
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001852#include "lockdep_states.h"
1853#undef LOCKDEP_STATE
1854};
1855
1856static const char *state_rnames[] = {
1857#define LOCKDEP_STATE(__STATE) \
Peter Zijlstrab4b136f2009-01-29 14:50:36 +01001858 __stringify(__STATE)"-READ",
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001859#include "lockdep_states.h"
1860#undef LOCKDEP_STATE
1861};
1862
1863static inline const char *state_name(enum lock_usage_bit bit)
1864{
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01001865 return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001866}
1867
1868static int exclusive_bit(int new_bit)
1869{
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01001870 int state = new_bit & LOCK_USAGE_STATE_MASK;
1871 int dir = new_bit & LOCK_USAGE_DIR_MASK;
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001872
1873 /*
1874 * keep state, bit flip the direction and strip read.
1875 */
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01001876 return state | (dir ^ LOCK_USAGE_DIR_MASK);
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001877}
1878
1879static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1880 struct held_lock *next, enum lock_usage_bit bit)
Peter Zijlstra8e182572007-07-19 01:48:54 -07001881{
1882 /*
1883 * Prove that the new dependency does not connect a hardirq-safe
1884 * lock with a hardirq-unsafe lock - to achieve this we search
1885 * the backwards-subgraph starting at <prev>, and the
1886 * forwards-subgraph starting at <next>:
1887 */
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001888 if (!check_usage(curr, prev, next, bit,
1889 exclusive_bit(bit), state_name(bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001890 return 0;
1891
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001892 bit++; /* _READ */
1893
Peter Zijlstra8e182572007-07-19 01:48:54 -07001894 /*
1895 * Prove that the new dependency does not connect a hardirq-safe-read
1896 * lock with a hardirq-unsafe lock - to achieve this we search
1897 * the backwards-subgraph starting at <prev>, and the
1898 * forwards-subgraph starting at <next>:
1899 */
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001900 if (!check_usage(curr, prev, next, bit,
1901 exclusive_bit(bit), state_name(bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07001902 return 0;
1903
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001904 return 1;
1905}
Peter Zijlstra8e182572007-07-19 01:48:54 -07001906
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001907static int
1908check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1909 struct held_lock *next)
1910{
1911#define LOCKDEP_STATE(__STATE) \
1912 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
Nick Piggincf40bd12009-01-21 08:12:39 +01001913 return 0;
Peter Zijlstra4f367d8a2009-01-22 18:10:42 +01001914#include "lockdep_states.h"
1915#undef LOCKDEP_STATE
Nick Piggincf40bd12009-01-21 08:12:39 +01001916
Peter Zijlstra8e182572007-07-19 01:48:54 -07001917 return 1;
1918}
1919
1920static void inc_chains(void)
1921{
1922 if (current->hardirq_context)
1923 nr_hardirq_chains++;
1924 else {
1925 if (current->softirq_context)
1926 nr_softirq_chains++;
1927 else
1928 nr_process_chains++;
1929 }
1930}
1931
1932#else
1933
1934static inline int
1935check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1936 struct held_lock *next)
1937{
1938 return 1;
1939}
1940
1941static inline void inc_chains(void)
1942{
1943 nr_process_chains++;
1944}
1945
1946#endif
1947
Steven Rostedt48702ec2011-04-20 21:41:56 -04001948static void
1949print_deadlock_scenario(struct held_lock *nxt,
1950 struct held_lock *prv)
1951{
1952 struct lock_class *next = hlock_class(nxt);
1953 struct lock_class *prev = hlock_class(prv);
1954
1955 printk(" Possible unsafe locking scenario:\n\n");
1956 printk(" CPU0\n");
1957 printk(" ----\n");
1958 printk(" lock(");
1959 __print_lock_name(prev);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001960 printk(KERN_CONT ");\n");
Steven Rostedt48702ec2011-04-20 21:41:56 -04001961 printk(" lock(");
1962 __print_lock_name(next);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01001963 printk(KERN_CONT ");\n");
Steven Rostedt48702ec2011-04-20 21:41:56 -04001964 printk("\n *** DEADLOCK ***\n\n");
1965 printk(" May be due to missing lock nesting notation\n\n");
1966}
1967
Peter Zijlstra8e182572007-07-19 01:48:54 -07001968static int
1969print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1970 struct held_lock *next)
1971{
1972 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1973 return 0;
1974
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001975 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001976 pr_warn("============================================\n");
1977 pr_warn("WARNING: possible recursive locking detected\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01001978 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08001979 pr_warn("--------------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001980 pr_warn("%s/%d is trying to acquire lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001981 curr->comm, task_pid_nr(curr));
Peter Zijlstra8e182572007-07-19 01:48:54 -07001982 print_lock(next);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001983 pr_warn("\nbut task is already holding lock:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001984 print_lock(prev);
1985
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001986 pr_warn("\nother info that might help us debug this:\n");
Steven Rostedt48702ec2011-04-20 21:41:56 -04001987 print_deadlock_scenario(next, prev);
Peter Zijlstra8e182572007-07-19 01:48:54 -07001988 lockdep_print_held_locks(curr);
1989
Paul E. McKenney681fbec2017-05-04 15:44:38 -07001990 pr_warn("\nstack backtrace:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07001991 dump_stack();
1992
1993 return 0;
1994}
1995
1996/*
1997 * Check whether we are holding such a class already.
1998 *
1999 * (Note that this has to be done separately, because the graph cannot
2000 * detect such classes of deadlocks.)
2001 *
2002 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2003 */
2004static int
2005check_deadlock(struct task_struct *curr, struct held_lock *next,
2006 struct lockdep_map *next_instance, int read)
2007{
2008 struct held_lock *prev;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02002009 struct held_lock *nest = NULL;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002010 int i;
2011
2012 for (i = 0; i < curr->lockdep_depth; i++) {
2013 prev = curr->held_locks + i;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02002014
2015 if (prev->instance == next->nest_lock)
2016 nest = prev;
2017
Dave Jonesf82b2172008-08-11 09:30:23 +02002018 if (hlock_class(prev) != hlock_class(next))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002019 continue;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02002020
Peter Zijlstra8e182572007-07-19 01:48:54 -07002021 /*
2022 * Allow read-after-read recursion of the same
2023 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2024 */
2025 if ((read == 2) && prev->read)
2026 return 2;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02002027
2028 /*
2029 * We're holding the nest_lock, which serializes this lock's
2030 * nesting behaviour.
2031 */
2032 if (nest)
2033 return 2;
2034
Peter Zijlstra8e182572007-07-19 01:48:54 -07002035 return print_deadlock_bug(curr, prev, next);
2036 }
2037 return 1;
2038}
2039
2040/*
2041 * There was a chain-cache miss, and we are about to add a new dependency
2042 * to a previous lock. We recursively validate the following rules:
2043 *
2044 * - would the adding of the <prev> -> <next> dependency create a
2045 * circular dependency in the graph? [== circular deadlock]
2046 *
2047 * - does the new prev->next dependency connect any hardirq-safe lock
2048 * (in the full backwards-subgraph starting at <prev>) with any
2049 * hardirq-unsafe lock (in the full forwards-subgraph starting at
2050 * <next>)? [== illegal lock inversion with hardirq contexts]
2051 *
2052 * - does the new prev->next dependency connect any softirq-safe lock
2053 * (in the full backwards-subgraph starting at <prev>) with any
2054 * softirq-unsafe lock (in the full forwards-subgraph starting at
2055 * <next>)? [== illegal lock inversion with softirq contexts]
2056 *
2057 * any of these scenarios could lead to a deadlock.
2058 *
2059 * Then if all the validations pass, we add the forwards and backwards
2060 * dependency.
2061 */
2062static int
2063check_prev_add(struct task_struct *curr, struct held_lock *prev,
Byungchul Parkce07a9412017-08-07 16:12:51 +09002064 struct held_lock *next, int distance, struct stack_trace *trace,
2065 int (*save)(struct stack_trace *trace))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002066{
Ming Leidb0002a2009-07-16 15:44:29 +02002067 struct lock_list *uninitialized_var(target_entry);
Peter Zijlstra8b405d52017-10-04 11:13:37 +02002068 struct lock_list *entry;
2069 struct lock_list this;
2070 int ret;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002071
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08002072 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2073 /*
2074 * The warning statements below may trigger a use-after-free
2075 * of the class name. It is better to trigger a use-after free
2076 * and to have the class name most of the time instead of not
2077 * having the class name available.
2078 */
2079 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2080 "Detected use-after-free of lock class %px/%s\n",
2081 hlock_class(prev),
2082 hlock_class(prev)->name);
2083 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2084 "Detected use-after-free of lock class %px/%s\n",
2085 hlock_class(next),
2086 hlock_class(next)->name);
2087 return 2;
2088 }
2089
Peter Zijlstra8e182572007-07-19 01:48:54 -07002090 /*
2091 * Prove that the new <prev> -> <next> dependency would not
2092 * create a circular dependency in the graph. (We do this by
2093 * forward-recursing into the graph starting at <next>, and
2094 * checking whether we can reach <prev>.)
2095 *
2096 * We are using global variables to control the recursion, to
2097 * keep the stackframe size of the recursive functions low:
2098 */
Ming Leidb0002a2009-07-16 15:44:29 +02002099 this.class = hlock_class(next);
2100 this.parent = NULL;
2101 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
Peter Zijlstra8b405d52017-10-04 11:13:37 +02002102 if (unlikely(!ret)) {
2103 if (!trace->entries) {
2104 /*
2105 * If @save fails here, the printing might trigger
2106 * a WARN but because of the !nr_entries it should
2107 * not do bad things.
2108 */
2109 save(trace);
2110 }
Byungchul Park383a4bc2017-08-07 16:12:55 +09002111 return print_circular_bug(&this, target_entry, next, prev, trace);
Peter Zijlstra8b405d52017-10-04 11:13:37 +02002112 }
Ming Leidb0002a2009-07-16 15:44:29 +02002113 else if (unlikely(ret < 0))
2114 return print_bfs_bug(ret);
Ming Leic94aa5c2009-07-16 15:44:29 +02002115
Peter Zijlstra8e182572007-07-19 01:48:54 -07002116 if (!check_prev_add_irq(curr, prev, next))
2117 return 0;
2118
2119 /*
2120 * For recursive read-locks we do all the dependency checks,
2121 * but we dont store read-triggered dependencies (only
2122 * write-triggered dependencies). This ensures that only the
2123 * write-side dependencies matter, and that if for example a
2124 * write-lock never takes any other locks, then the reads are
2125 * equivalent to a NOP.
2126 */
2127 if (next->read == 2 || prev->read == 2)
2128 return 1;
2129 /*
2130 * Is the <prev> -> <next> dependency already present?
2131 *
2132 * (this may occur even though this is a new chain: consider
2133 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2134 * chains - the second one will be new, but L1 already has
2135 * L2 added to its dependency list, due to the first chain.)
2136 */
Dave Jonesf82b2172008-08-11 09:30:23 +02002137 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2138 if (entry->class == hlock_class(next)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07002139 if (distance == 1)
2140 entry->distance = 1;
Byungchul Park70911fd2017-08-07 16:12:50 +09002141 return 1;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002142 }
2143 }
2144
Peter Zijlstraae813302017-03-03 10:13:38 +01002145 /*
2146 * Is the <prev> -> <next> link redundant?
2147 */
2148 this.class = hlock_class(prev);
2149 this.parent = NULL;
2150 ret = check_redundant(&this, hlock_class(next), &target_entry);
2151 if (!ret) {
2152 debug_atomic_inc(nr_redundant);
2153 return 2;
2154 }
2155 if (ret < 0)
2156 return print_bfs_bug(ret);
2157
2158
Peter Zijlstra8b405d52017-10-04 11:13:37 +02002159 if (!trace->entries && !save(trace))
Byungchul Parkce07a9412017-08-07 16:12:51 +09002160 return 0;
Yong Zhang4726f2a2010-05-04 14:16:48 +08002161
Peter Zijlstra8e182572007-07-19 01:48:54 -07002162 /*
2163 * Ok, all validations passed, add the new lock
2164 * to the previous lock's dependency list:
2165 */
Bart Van Assche86cffb82019-02-14 15:00:41 -08002166 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
Dave Jonesf82b2172008-08-11 09:30:23 +02002167 &hlock_class(prev)->locks_after,
Byungchul Parkce07a9412017-08-07 16:12:51 +09002168 next->acquire_ip, distance, trace);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002169
2170 if (!ret)
2171 return 0;
2172
Bart Van Assche86cffb82019-02-14 15:00:41 -08002173 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
Dave Jonesf82b2172008-08-11 09:30:23 +02002174 &hlock_class(next)->locks_before,
Byungchul Parkce07a9412017-08-07 16:12:51 +09002175 next->acquire_ip, distance, trace);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002176 if (!ret)
2177 return 0;
2178
Byungchul Park70911fd2017-08-07 16:12:50 +09002179 return 2;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002180}
2181
2182/*
2183 * Add the dependency to all directly-previous locks that are 'relevant'.
2184 * The ones that are relevant are (in increasing distance from curr):
2185 * all consecutive trylock entries and the final non-trylock entry - or
2186 * the end of this context's lock-chain - whichever comes first.
2187 */
2188static int
2189check_prevs_add(struct task_struct *curr, struct held_lock *next)
2190{
2191 int depth = curr->lockdep_depth;
2192 struct held_lock *hlock;
Peter Zijlstra8b405d52017-10-04 11:13:37 +02002193 struct stack_trace trace = {
2194 .nr_entries = 0,
2195 .max_entries = 0,
2196 .entries = NULL,
2197 .skip = 0,
2198 };
Peter Zijlstra8e182572007-07-19 01:48:54 -07002199
2200 /*
2201 * Debugging checks.
2202 *
2203 * Depth must not be zero for a non-head lock:
2204 */
2205 if (!depth)
2206 goto out_bug;
2207 /*
2208 * At least two relevant locks must exist for this
2209 * to be a head:
2210 */
2211 if (curr->held_locks[depth].irq_context !=
2212 curr->held_locks[depth-1].irq_context)
2213 goto out_bug;
2214
2215 for (;;) {
2216 int distance = curr->lockdep_depth - depth + 1;
Oleg Nesterov1b5ff812014-01-20 19:20:10 +01002217 hlock = curr->held_locks + depth - 1;
Byungchul Parkce07a9412017-08-07 16:12:51 +09002218
Ingo Molnare966eae2017-12-12 12:31:16 +01002219 /*
2220 * Only non-recursive-read entries get new dependencies
2221 * added:
2222 */
2223 if (hlock->read != 2 && hlock->check) {
2224 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
2225 if (!ret)
2226 return 0;
2227
2228 /*
2229 * Stop after the first non-trylock entry,
2230 * as non-trylock entries have added their
2231 * own direct dependencies already, so this
2232 * lock is connected to them indirectly:
2233 */
2234 if (!hlock->trylock)
2235 break;
Peter Zijlstra8e182572007-07-19 01:48:54 -07002236 }
Ingo Molnare966eae2017-12-12 12:31:16 +01002237
Peter Zijlstra8e182572007-07-19 01:48:54 -07002238 depth--;
2239 /*
2240 * End of lock-stack?
2241 */
2242 if (!depth)
2243 break;
2244 /*
2245 * Stop the search if we cross into another context:
2246 */
2247 if (curr->held_locks[depth].irq_context !=
2248 curr->held_locks[depth-1].irq_context)
2249 break;
2250 }
2251 return 1;
2252out_bug:
2253 if (!debug_locks_off_graph_unlock())
2254 return 0;
2255
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002256 /*
2257 * Clearly we all shouldn't be here, but since we made it we
2258 * can reliable say we messed up our state. See the above two
2259 * gotos for reasons why we could possibly end up here.
2260 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07002261 WARN_ON(1);
2262
2263 return 0;
2264}
2265
Huang, Ying443cd502008-06-20 16:39:21 +08002266struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
Bart Van Asschede4643a2019-02-14 15:00:50 -08002267static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
Huang, Yingcd1a28e2008-06-23 11:20:54 +08002268int nr_chain_hlocks;
Huang, Ying443cd502008-06-20 16:39:21 +08002269static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2270
2271struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2272{
2273 return lock_classes + chain_hlocks[chain->base + i];
2274}
Peter Zijlstra8e182572007-07-19 01:48:54 -07002275
2276/*
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002277 * Returns the index of the first held_lock of the current chain
2278 */
2279static inline int get_first_held_lock(struct task_struct *curr,
2280 struct held_lock *hlock)
2281{
2282 int i;
2283 struct held_lock *hlock_curr;
2284
2285 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2286 hlock_curr = curr->held_locks + i;
2287 if (hlock_curr->irq_context != hlock->irq_context)
2288 break;
2289
2290 }
2291
2292 return ++i;
2293}
2294
Borislav Petkov5c8a0102016-04-04 10:42:07 +02002295#ifdef CONFIG_DEBUG_LOCKDEP
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002296/*
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002297 * Returns the next chain_key iteration
2298 */
2299static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2300{
2301 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2302
2303 printk(" class_idx:%d -> chain_key:%016Lx",
2304 class_idx,
2305 (unsigned long long)new_chain_key);
2306 return new_chain_key;
2307}
2308
2309static void
2310print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2311{
2312 struct held_lock *hlock;
2313 u64 chain_key = 0;
2314 int depth = curr->lockdep_depth;
2315 int i;
2316
2317 printk("depth: %u\n", depth + 1);
2318 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2319 hlock = curr->held_locks + i;
2320 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2321
2322 print_lock(hlock);
2323 }
2324
2325 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2326 print_lock(hlock_next);
2327}
2328
2329static void print_chain_keys_chain(struct lock_chain *chain)
2330{
2331 int i;
2332 u64 chain_key = 0;
2333 int class_id;
2334
2335 printk("depth: %u\n", chain->depth);
2336 for (i = 0; i < chain->depth; i++) {
2337 class_id = chain_hlocks[chain->base + i];
2338 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2339
2340 print_lock_name(lock_classes + class_id);
2341 printk("\n");
2342 }
2343}
2344
2345static void print_collision(struct task_struct *curr,
2346 struct held_lock *hlock_next,
2347 struct lock_chain *chain)
2348{
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002349 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002350 pr_warn("============================\n");
2351 pr_warn("WARNING: chain_key collision\n");
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002352 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002353 pr_warn("----------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002354 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2355 pr_warn("Hash chain already cached but the contents don't match!\n");
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002356
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002357 pr_warn("Held locks:");
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002358 print_chain_keys_held_locks(curr, hlock_next);
2359
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002360 pr_warn("Locks in cached chain:");
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002361 print_chain_keys_chain(chain);
2362
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002363 pr_warn("\nstack backtrace:\n");
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002364 dump_stack();
2365}
Borislav Petkov5c8a0102016-04-04 10:42:07 +02002366#endif
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002367
2368/*
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002369 * Checks whether the chain and the current held locks are consistent
2370 * in depth and also in content. If they are not it most likely means
2371 * that there was a collision during the calculation of the chain_key.
2372 * Returns: 0 not passed, 1 passed
2373 */
2374static int check_no_collision(struct task_struct *curr,
2375 struct held_lock *hlock,
2376 struct lock_chain *chain)
2377{
2378#ifdef CONFIG_DEBUG_LOCKDEP
2379 int i, j, id;
2380
2381 i = get_first_held_lock(curr, hlock);
2382
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002383 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2384 print_collision(curr, hlock, chain);
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002385 return 0;
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002386 }
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002387
2388 for (j = 0; j < chain->depth - 1; j++, i++) {
2389 id = curr->held_locks[i].class_idx - 1;
2390
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002391 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2392 print_collision(curr, hlock, chain);
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002393 return 0;
Alfredo Alvarez Fernandez39e2e172016-03-30 19:03:36 +02002394 }
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002395 }
2396#endif
2397 return 1;
2398}
2399
2400/*
Bart Van Assche22126842019-02-14 15:00:48 -08002401 * Given an index that is >= -1, return the index of the next lock chain.
2402 * Return -2 if there is no next lock chain.
2403 */
2404long lockdep_next_lockchain(long i)
2405{
Bart Van Asschede4643a2019-02-14 15:00:50 -08002406 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
2407 return i < ARRAY_SIZE(lock_chains) ? i : -2;
Bart Van Assche22126842019-02-14 15:00:48 -08002408}
2409
2410unsigned long lock_chain_count(void)
2411{
Bart Van Asschede4643a2019-02-14 15:00:50 -08002412 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
2413}
2414
2415/* Must be called with the graph lock held. */
2416static struct lock_chain *alloc_lock_chain(void)
2417{
2418 int idx = find_first_zero_bit(lock_chains_in_use,
2419 ARRAY_SIZE(lock_chains));
2420
2421 if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
2422 return NULL;
2423 __set_bit(idx, lock_chains_in_use);
2424 return lock_chains + idx;
Bart Van Assche22126842019-02-14 15:00:48 -08002425}
2426
2427/*
Byungchul Park545c23f2017-08-07 16:12:48 +09002428 * Adds a dependency chain into chain hashtable. And must be called with
2429 * graph_lock held.
2430 *
2431 * Return 0 if fail, and graph_lock is released.
2432 * Return 1 if succeed, with graph_lock held.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002433 */
Byungchul Park545c23f2017-08-07 16:12:48 +09002434static inline int add_chain_cache(struct task_struct *curr,
2435 struct held_lock *hlock,
2436 u64 chain_key)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002437{
Dave Jonesf82b2172008-08-11 09:30:23 +02002438 struct lock_class *class = hlock_class(hlock);
Andrew Mortona63f38c2016-02-03 13:44:12 -08002439 struct hlist_head *hash_head = chainhashentry(chain_key);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002440 struct lock_chain *chain;
Steven Rostedte0944ee2011-04-20 21:42:00 -04002441 int i, j;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002442
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002443 /*
Bart Van Assche527af3e2019-02-14 15:00:49 -08002444 * The caller must hold the graph lock, ensure we've got IRQs
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002445 * disabled to make this an IRQ-safe lock.. for recursion reasons
2446 * lockdep won't complain about its own locking errors.
2447 */
Jarek Poplawski381a2292007-02-10 01:44:58 -08002448 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2449 return 0;
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002450
Bart Van Asschede4643a2019-02-14 15:00:50 -08002451 chain = alloc_lock_chain();
2452 if (!chain) {
Ingo Molnar74c383f2006-12-13 00:34:43 -08002453 if (!debug_locks_off_graph_unlock())
2454 return 0;
2455
Dave Jones2c522832013-04-25 13:40:02 -04002456 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01002457 dump_stack();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002458 return 0;
2459 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002460 chain->chain_key = chain_key;
Huang, Ying443cd502008-06-20 16:39:21 +08002461 chain->irq_context = hlock->irq_context;
Ingo Molnar9e4e7552016-02-29 10:03:58 +01002462 i = get_first_held_lock(curr, hlock);
Huang, Ying443cd502008-06-20 16:39:21 +08002463 chain->depth = curr->lockdep_depth + 1 - i;
Peter Zijlstra75dd6022016-03-30 11:36:59 +02002464
2465 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2466 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2467 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2468
Steven Rostedte0944ee2011-04-20 21:42:00 -04002469 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2470 chain->base = nr_chain_hlocks;
Huang, Ying443cd502008-06-20 16:39:21 +08002471 for (j = 0; j < chain->depth - 1; j++, i++) {
Dave Jonesf82b2172008-08-11 09:30:23 +02002472 int lock_id = curr->held_locks[i].class_idx - 1;
Huang, Ying443cd502008-06-20 16:39:21 +08002473 chain_hlocks[chain->base + j] = lock_id;
2474 }
2475 chain_hlocks[chain->base + j] = class - lock_classes;
Peter Zijlstra75dd6022016-03-30 11:36:59 +02002476 nr_chain_hlocks += chain->depth;
Bart Van Assche523b1132019-02-14 15:00:39 -08002477 } else {
Byungchul Parkf9af4562017-01-13 11:42:04 +09002478 if (!debug_locks_off_graph_unlock())
Peter Zijlstra75dd6022016-03-30 11:36:59 +02002479 return 0;
2480
2481 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2482 dump_stack();
2483 return 0;
2484 }
Peter Zijlstra75dd6022016-03-30 11:36:59 +02002485
Andrew Mortona63f38c2016-02-03 13:44:12 -08002486 hlist_add_head_rcu(&chain->entry, hash_head);
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02002487 debug_atomic_inc(chain_lookup_misses);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002488 inc_chains();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002489
2490 return 1;
2491}
Peter Zijlstra8e182572007-07-19 01:48:54 -07002492
Byungchul Park545c23f2017-08-07 16:12:48 +09002493/*
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08002494 * Look up a dependency chain. Must be called with either the graph lock or
2495 * the RCU read lock held.
Byungchul Park545c23f2017-08-07 16:12:48 +09002496 */
2497static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2498{
2499 struct hlist_head *hash_head = chainhashentry(chain_key);
2500 struct lock_chain *chain;
2501
Byungchul Park545c23f2017-08-07 16:12:48 +09002502 hlist_for_each_entry_rcu(chain, hash_head, entry) {
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08002503 if (READ_ONCE(chain->chain_key) == chain_key) {
Byungchul Park545c23f2017-08-07 16:12:48 +09002504 debug_atomic_inc(chain_lookup_hits);
2505 return chain;
2506 }
2507 }
2508 return NULL;
2509}
2510
2511/*
2512 * If the key is not present yet in dependency chain cache then
2513 * add it and return 1 - in this case the new dependency chain is
2514 * validated. If the key is already hashed, return 0.
2515 * (On return with 1 graph_lock is held.)
2516 */
2517static inline int lookup_chain_cache_add(struct task_struct *curr,
2518 struct held_lock *hlock,
2519 u64 chain_key)
2520{
2521 struct lock_class *class = hlock_class(hlock);
2522 struct lock_chain *chain = lookup_chain_cache(chain_key);
2523
2524 if (chain) {
2525cache_hit:
2526 if (!check_no_collision(curr, hlock, chain))
2527 return 0;
2528
2529 if (very_verbose(class)) {
2530 printk("\nhash chain already cached, key: "
Borislav Petkov04860d42018-02-26 14:49:26 +01002531 "%016Lx tail class: [%px] %s\n",
Byungchul Park545c23f2017-08-07 16:12:48 +09002532 (unsigned long long)chain_key,
2533 class->key, class->name);
2534 }
2535
2536 return 0;
2537 }
2538
2539 if (very_verbose(class)) {
Borislav Petkov04860d42018-02-26 14:49:26 +01002540 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
Byungchul Park545c23f2017-08-07 16:12:48 +09002541 (unsigned long long)chain_key, class->key, class->name);
2542 }
2543
2544 if (!graph_lock())
2545 return 0;
2546
2547 /*
2548 * We have to walk the chain again locked - to avoid duplicates:
2549 */
2550 chain = lookup_chain_cache(chain_key);
2551 if (chain) {
2552 graph_unlock();
2553 goto cache_hit;
2554 }
2555
2556 if (!add_chain_cache(curr, hlock, chain_key))
2557 return 0;
2558
2559 return 1;
2560}
2561
Peter Zijlstra8e182572007-07-19 01:48:54 -07002562static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
Johannes Berg4e6045f2007-10-18 23:39:55 -07002563 struct held_lock *hlock, int chain_head, u64 chain_key)
Peter Zijlstra8e182572007-07-19 01:48:54 -07002564{
2565 /*
2566 * Trylock needs to maintain the stack of held locks, but it
2567 * does not add new dependencies, because trylock can be done
2568 * in any order.
2569 *
2570 * We look up the chain_key and do the O(N^2) check and update of
2571 * the dependencies only if this is a new dependency chain.
Byungchul Park545c23f2017-08-07 16:12:48 +09002572 * (If lookup_chain_cache_add() return with 1 it acquires
Peter Zijlstra8e182572007-07-19 01:48:54 -07002573 * graph_lock for us)
2574 */
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +01002575 if (!hlock->trylock && hlock->check &&
Byungchul Park545c23f2017-08-07 16:12:48 +09002576 lookup_chain_cache_add(curr, hlock, chain_key)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07002577 /*
2578 * Check whether last held lock:
2579 *
2580 * - is irq-safe, if this lock is irq-unsafe
2581 * - is softirq-safe, if this lock is hardirq-unsafe
2582 *
2583 * And check whether the new lock's dependency graph
2584 * could lead back to the previous lock.
2585 *
2586 * any of these scenarios could lead to a deadlock. If
2587 * All validations
2588 */
2589 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2590
2591 if (!ret)
2592 return 0;
2593 /*
2594 * Mark recursive read, as we jump over it when
2595 * building dependencies (just like we jump over
2596 * trylock entries):
2597 */
2598 if (ret == 2)
2599 hlock->read = 2;
2600 /*
2601 * Add dependency only if this lock is not the head
2602 * of the chain, and if it's not a secondary read-lock:
2603 */
Byungchul Park545c23f2017-08-07 16:12:48 +09002604 if (!chain_head && ret != 2) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07002605 if (!check_prevs_add(curr, hlock))
2606 return 0;
Byungchul Park545c23f2017-08-07 16:12:48 +09002607 }
2608
Peter Zijlstra8e182572007-07-19 01:48:54 -07002609 graph_unlock();
Byungchul Park545c23f2017-08-07 16:12:48 +09002610 } else {
2611 /* after lookup_chain_cache_add(): */
Peter Zijlstra8e182572007-07-19 01:48:54 -07002612 if (unlikely(!debug_locks))
2613 return 0;
Byungchul Park545c23f2017-08-07 16:12:48 +09002614 }
Peter Zijlstra8e182572007-07-19 01:48:54 -07002615
2616 return 1;
2617}
2618#else
2619static inline int validate_chain(struct task_struct *curr,
2620 struct lockdep_map *lock, struct held_lock *hlock,
Gregory Haskins3aa416b2007-10-11 22:11:11 +02002621 int chain_head, u64 chain_key)
Peter Zijlstra8e182572007-07-19 01:48:54 -07002622{
2623 return 1;
2624}
Peter Zijlstraca58abc2007-07-19 01:48:53 -07002625#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002626
2627/*
2628 * We are building curr_chain_key incrementally, so double-check
2629 * it from scratch, to make sure that it's done correctly:
2630 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002631static void check_chain_key(struct task_struct *curr)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002632{
2633#ifdef CONFIG_DEBUG_LOCKDEP
2634 struct held_lock *hlock, *prev_hlock = NULL;
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01002635 unsigned int i;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002636 u64 chain_key = 0;
2637
2638 for (i = 0; i < curr->lockdep_depth; i++) {
2639 hlock = curr->held_locks + i;
2640 if (chain_key != hlock->prev_chain_key) {
2641 debug_locks_off();
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002642 /*
2643 * We got mighty confused, our chain keys don't match
2644 * with what we expect, someone trample on our task state?
2645 */
Arjan van de Ven2df8b1d2008-07-30 12:43:11 -07002646 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002647 curr->lockdep_depth, i,
2648 (unsigned long long)chain_key,
2649 (unsigned long long)hlock->prev_chain_key);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002650 return;
2651 }
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002652 /*
2653 * Whoops ran out of static storage again?
2654 */
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01002655 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
Jarek Poplawski381a2292007-02-10 01:44:58 -08002656 return;
2657
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002658 if (prev_hlock && (prev_hlock->irq_context !=
2659 hlock->irq_context))
2660 chain_key = 0;
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01002661 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002662 prev_hlock = hlock;
2663 }
2664 if (chain_key != curr->curr_chain_key) {
2665 debug_locks_off();
Peter Zijlstra0119fee2011-09-02 01:30:29 +02002666 /*
2667 * More smoking hash instead of calculating it, damn see these
2668 * numbers float.. I bet that a pink elephant stepped on my memory.
2669 */
Arjan van de Ven2df8b1d2008-07-30 12:43:11 -07002670 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002671 curr->lockdep_depth, i,
2672 (unsigned long long)chain_key,
2673 (unsigned long long)curr->curr_chain_key);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002674 }
2675#endif
2676}
2677
Steven Rostedt282b5c22011-04-20 21:41:59 -04002678static void
2679print_usage_bug_scenario(struct held_lock *lock)
2680{
2681 struct lock_class *class = hlock_class(lock);
2682
2683 printk(" Possible unsafe locking scenario:\n\n");
2684 printk(" CPU0\n");
2685 printk(" ----\n");
2686 printk(" lock(");
2687 __print_lock_name(class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002688 printk(KERN_CONT ");\n");
Steven Rostedt282b5c22011-04-20 21:41:59 -04002689 printk(" <Interrupt>\n");
2690 printk(" lock(");
2691 __print_lock_name(class);
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002692 printk(KERN_CONT ");\n");
Steven Rostedt282b5c22011-04-20 21:41:59 -04002693 printk("\n *** DEADLOCK ***\n\n");
2694}
2695
Peter Zijlstra8e182572007-07-19 01:48:54 -07002696static int
2697print_usage_bug(struct task_struct *curr, struct held_lock *this,
2698 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2699{
2700 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2701 return 0;
2702
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002703 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002704 pr_warn("================================\n");
2705 pr_warn("WARNING: inconsistent lock state\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01002706 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002707 pr_warn("--------------------------------\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07002708
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002709 pr_warn("inconsistent {%s} -> {%s} usage.\n",
Peter Zijlstra8e182572007-07-19 01:48:54 -07002710 usage_str[prev_bit], usage_str[new_bit]);
2711
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002712 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07002713 curr->comm, task_pid_nr(curr),
Peter Zijlstra8e182572007-07-19 01:48:54 -07002714 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2715 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2716 trace_hardirqs_enabled(curr),
2717 trace_softirqs_enabled(curr));
2718 print_lock(this);
2719
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002720 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
Dave Jonesf82b2172008-08-11 09:30:23 +02002721 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
Peter Zijlstra8e182572007-07-19 01:48:54 -07002722
2723 print_irqtrace_events(curr);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002724 pr_warn("\nother info that might help us debug this:\n");
Steven Rostedt282b5c22011-04-20 21:41:59 -04002725 print_usage_bug_scenario(this);
2726
Peter Zijlstra8e182572007-07-19 01:48:54 -07002727 lockdep_print_held_locks(curr);
2728
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002729 pr_warn("\nstack backtrace:\n");
Peter Zijlstra8e182572007-07-19 01:48:54 -07002730 dump_stack();
2731
2732 return 0;
2733}
2734
2735/*
2736 * Print out an error if an invalid bit is set:
2737 */
2738static inline int
2739valid_state(struct task_struct *curr, struct held_lock *this,
2740 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2741{
Dave Jonesf82b2172008-08-11 09:30:23 +02002742 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
Peter Zijlstra8e182572007-07-19 01:48:54 -07002743 return print_usage_bug(curr, this, bad_bit, new_bit);
2744 return 1;
2745}
2746
2747static int mark_lock(struct task_struct *curr, struct held_lock *this,
2748 enum lock_usage_bit new_bit);
2749
Steven Rostedt81d68a92008-05-12 21:20:42 +02002750#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002751
2752/*
2753 * print irq inversion bug:
2754 */
2755static int
Ming Lei24208ca2009-07-16 15:44:29 +02002756print_irq_inversion_bug(struct task_struct *curr,
2757 struct lock_list *root, struct lock_list *other,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002758 struct held_lock *this, int forwards,
2759 const char *irqclass)
2760{
Steven Rostedtdad3d742011-04-20 21:41:57 -04002761 struct lock_list *entry = other;
2762 struct lock_list *middle = NULL;
2763 int depth;
2764
Ingo Molnar74c383f2006-12-13 00:34:43 -08002765 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002766 return 0;
2767
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002768 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002769 pr_warn("========================================================\n");
2770 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01002771 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08002772 pr_warn("--------------------------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002773 pr_warn("%s/%d just changed the state of lock:\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07002774 curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002775 print_lock(this);
2776 if (forwards)
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002777 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002778 else
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002779 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
Ming Lei24208ca2009-07-16 15:44:29 +02002780 print_lock_name(other->class);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002781 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002782
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002783 pr_warn("\nother info that might help us debug this:\n");
Steven Rostedtdad3d742011-04-20 21:41:57 -04002784
2785 /* Find a middle lock (if one exists) */
2786 depth = get_lock_depth(other);
2787 do {
2788 if (depth == 0 && (entry != root)) {
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002789 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
Steven Rostedtdad3d742011-04-20 21:41:57 -04002790 break;
2791 }
2792 middle = entry;
2793 entry = get_lock_parent(entry);
2794 depth--;
2795 } while (entry && entry != root && (depth >= 0));
2796 if (forwards)
2797 print_irq_lock_scenario(root, other,
2798 middle ? middle->class : root->class, other->class);
2799 else
2800 print_irq_lock_scenario(other, root,
2801 middle ? middle->class : other->class, root->class);
2802
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002803 lockdep_print_held_locks(curr);
2804
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002805 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
Ming Lei24208ca2009-07-16 15:44:29 +02002806 if (!save_trace(&root->trace))
2807 return 0;
2808 print_shortest_lock_dependencies(other, root);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002809
Paul E. McKenney681fbec2017-05-04 15:44:38 -07002810 pr_warn("\nstack backtrace:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002811 dump_stack();
2812
2813 return 0;
2814}
2815
2816/*
2817 * Prove that in the forwards-direction subgraph starting at <this>
2818 * there is no lock matching <mask>:
2819 */
2820static int
2821check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2822 enum lock_usage_bit bit, const char *irqclass)
2823{
2824 int ret;
Ming Leid7aaba12009-07-16 15:44:29 +02002825 struct lock_list root;
2826 struct lock_list *uninitialized_var(target_entry);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002827
Ming Leid7aaba12009-07-16 15:44:29 +02002828 root.parent = NULL;
2829 root.class = hlock_class(this);
2830 ret = find_usage_forwards(&root, bit, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02002831 if (ret < 0)
2832 return print_bfs_bug(ret);
2833 if (ret == 1)
2834 return ret;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002835
Ming Lei24208ca2009-07-16 15:44:29 +02002836 return print_irq_inversion_bug(curr, &root, target_entry,
Ming Leid7aaba12009-07-16 15:44:29 +02002837 this, 1, irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002838}
2839
2840/*
2841 * Prove that in the backwards-direction subgraph starting at <this>
2842 * there is no lock matching <mask>:
2843 */
2844static int
2845check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2846 enum lock_usage_bit bit, const char *irqclass)
2847{
2848 int ret;
Ming Leid7aaba12009-07-16 15:44:29 +02002849 struct lock_list root;
2850 struct lock_list *uninitialized_var(target_entry);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002851
Ming Leid7aaba12009-07-16 15:44:29 +02002852 root.parent = NULL;
2853 root.class = hlock_class(this);
2854 ret = find_usage_backwards(&root, bit, &target_entry);
Peter Zijlstraaf012962009-07-16 15:44:29 +02002855 if (ret < 0)
2856 return print_bfs_bug(ret);
2857 if (ret == 1)
2858 return ret;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002859
Ming Lei24208ca2009-07-16 15:44:29 +02002860 return print_irq_inversion_bug(curr, &root, target_entry,
Oleg Nesterov48d50672010-01-26 19:16:41 +01002861 this, 0, irqclass);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002862}
2863
Ingo Molnar3117df02006-12-13 00:34:43 -08002864void print_irqtrace_events(struct task_struct *curr)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002865{
2866 printk("irq event stamp: %u\n", curr->irq_events);
Borislav Petkov04860d42018-02-26 14:49:26 +01002867 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002868 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2869 (void *)curr->hardirq_enable_ip);
Borislav Petkov04860d42018-02-26 14:49:26 +01002870 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002871 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2872 (void *)curr->hardirq_disable_ip);
Borislav Petkov04860d42018-02-26 14:49:26 +01002873 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002874 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2875 (void *)curr->softirq_enable_ip);
Borislav Petkov04860d42018-02-26 14:49:26 +01002876 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01002877 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2878 (void *)curr->softirq_disable_ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002879}
2880
Peter Zijlstracd953022009-01-22 16:38:21 +01002881static int HARDIRQ_verbose(struct lock_class *class)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002882{
Peter Zijlstra8e182572007-07-19 01:48:54 -07002883#if HARDIRQ_VERBOSE
2884 return class_filter(class);
2885#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002886 return 0;
2887}
2888
Peter Zijlstracd953022009-01-22 16:38:21 +01002889static int SOFTIRQ_verbose(struct lock_class *class)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002890{
Peter Zijlstra8e182572007-07-19 01:48:54 -07002891#if SOFTIRQ_VERBOSE
2892 return class_filter(class);
2893#endif
2894 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002895}
2896
2897#define STRICT_READ_CHECKS 1
2898
Peter Zijlstracd953022009-01-22 16:38:21 +01002899static int (*state_verbose_f[])(struct lock_class *class) = {
2900#define LOCKDEP_STATE(__STATE) \
2901 __STATE##_verbose,
2902#include "lockdep_states.h"
2903#undef LOCKDEP_STATE
2904};
2905
2906static inline int state_verbose(enum lock_usage_bit bit,
2907 struct lock_class *class)
2908{
2909 return state_verbose_f[bit >> 2](class);
2910}
2911
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002912typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2913 enum lock_usage_bit bit, const char *name);
2914
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002915static int
Peter Zijlstra1c21f142009-03-04 13:51:13 +01002916mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2917 enum lock_usage_bit new_bit)
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002918{
Peter Zijlstraf9892092009-01-22 16:09:59 +01002919 int excl_bit = exclusive_bit(new_bit);
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01002920 int read = new_bit & LOCK_USAGE_READ_MASK;
2921 int dir = new_bit & LOCK_USAGE_DIR_MASK;
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002922
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002923 /*
2924 * mark USED_IN has to look forwards -- to ensure no dependency
2925 * has ENABLED state, which would allow recursion deadlocks.
2926 *
2927 * mark ENABLED has to look backwards -- to ensure no dependee
2928 * has USED_IN state, which, again, would allow recursion deadlocks.
2929 */
Peter Zijlstra42c50d52009-01-22 16:58:16 +01002930 check_usage_f usage = dir ?
2931 check_usage_backwards : check_usage_forwards;
Peter Zijlstraf9892092009-01-22 16:09:59 +01002932
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002933 /*
2934 * Validate that this particular lock does not have conflicting
2935 * usage states.
2936 */
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002937 if (!valid_state(curr, this, new_bit, excl_bit))
2938 return 0;
Peter Zijlstra9d3651a2009-01-22 17:18:32 +01002939
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002940 /*
2941 * Validate that the lock dependencies don't have conflicting usage
2942 * states.
2943 */
2944 if ((!read || !dir || STRICT_READ_CHECKS) &&
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01002945 !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002946 return 0;
Peter Zijlstra780e8202009-01-22 16:51:29 +01002947
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002948 /*
2949 * Check for read in write conflicts
2950 */
2951 if (!read) {
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01002952 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002953 return 0;
2954
2955 if (STRICT_READ_CHECKS &&
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01002956 !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
2957 state_name(new_bit + LOCK_USAGE_READ_MASK)))
Peter Zijlstra38aa2712009-01-27 14:53:50 +01002958 return 0;
2959 }
Peter Zijlstra780e8202009-01-22 16:51:29 +01002960
Peter Zijlstracd953022009-01-22 16:38:21 +01002961 if (state_verbose(new_bit, hlock_class(this)))
Peter Zijlstra6a6904d2009-01-22 16:07:44 +01002962 return 2;
2963
2964 return 1;
2965}
2966
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002967/*
2968 * Mark all held locks with a usage bit:
2969 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02002970static int
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01002971mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002972{
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002973 struct held_lock *hlock;
2974 int i;
2975
2976 for (i = 0; i < curr->lockdep_depth; i++) {
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01002977 enum lock_usage_bit hlock_bit = base_bit;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002978 hlock = curr->held_locks + i;
2979
Peter Zijlstracf2ad4d2009-01-27 13:58:08 +01002980 if (hlock->read)
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +01002981 hlock_bit += LOCK_USAGE_READ_MASK;
Peter Zijlstracf2ad4d2009-01-27 13:58:08 +01002982
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01002983 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
Nick Piggincf40bd12009-01-21 08:12:39 +01002984
Oleg Nesterov34d0ed52014-01-20 19:20:13 +01002985 if (!hlock->check)
Peter Zijlstraefbe2ee2011-07-07 11:39:45 +02002986 continue;
2987
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01002988 if (!mark_lock(curr, hlock, hlock_bit))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002989 return 0;
2990 }
2991
2992 return 1;
2993}
2994
2995/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002996 * Hardirqs will be enabled:
2997 */
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02002998static void __trace_hardirqs_on_caller(unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002999{
3000 struct task_struct *curr = current;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003001
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003002 /* we'll do an OFF -> ON transition: */
3003 curr->hardirqs_enabled = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003004
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003005 /*
3006 * We are going to turn hardirqs on, so set the
3007 * usage bit for all held locks:
3008 */
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01003009 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003010 return;
3011 /*
3012 * If we have softirqs enabled, then set the usage
3013 * bit for all held locks. (disabled hardirqs prevented
3014 * this bit from being set before)
3015 */
3016 if (curr->softirqs_enabled)
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01003017 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003018 return;
3019
3020 curr->hardirq_enable_ip = ip;
3021 curr->hardirq_enable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003022 debug_atomic_inc(hardirqs_on_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003023}
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003024
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -04003025void lockdep_hardirqs_on(unsigned long ip)
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003026{
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003027 if (unlikely(!debug_locks || current->lockdep_recursion))
3028 return;
3029
Peter Zijlstra7d36b262011-07-26 13:13:44 +02003030 if (unlikely(current->hardirqs_enabled)) {
3031 /*
3032 * Neither irq nor preemption are disabled here
3033 * so this is racy by nature but losing one hit
3034 * in a stat is not a big deal.
3035 */
3036 __debug_atomic_inc(redundant_hardirqs_on);
3037 return;
3038 }
3039
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003040 /*
3041 * We're enabling irqs and according to our state above irqs weren't
3042 * already enabled, yet we find the hardware thinks they are in fact
3043 * enabled.. someone messed up their IRQ state tracing.
3044 */
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003045 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3046 return;
3047
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003048 /*
3049 * See the fine text that goes along with this variable definition.
3050 */
Peter Zijlstra7d36b262011-07-26 13:13:44 +02003051 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
3052 return;
3053
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003054 /*
3055 * Can't allow enabling interrupts while in an interrupt handler,
3056 * that's general bad form and such. Recursion, limited stack etc..
3057 */
Peter Zijlstra7d36b262011-07-26 13:13:44 +02003058 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3059 return;
3060
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003061 current->lockdep_recursion = 1;
3062 __trace_hardirqs_on_caller(ip);
3063 current->lockdep_recursion = 0;
3064}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003065
3066/*
3067 * Hardirqs were disabled:
3068 */
Steven Rostedt (VMware)bff1b202018-08-06 15:50:58 -04003069void lockdep_hardirqs_off(unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003070{
3071 struct task_struct *curr = current;
3072
3073 if (unlikely(!debug_locks || current->lockdep_recursion))
3074 return;
3075
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003076 /*
3077 * So we're supposed to get called after you mask local IRQs, but for
3078 * some reason the hardware doesn't quite think you did a proper job.
3079 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003080 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3081 return;
3082
3083 if (curr->hardirqs_enabled) {
3084 /*
3085 * We have done an ON -> OFF transition:
3086 */
3087 curr->hardirqs_enabled = 0;
Heiko Carstens6afe40b2008-10-28 11:14:58 +01003088 curr->hardirq_disable_ip = ip;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003089 curr->hardirq_disable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003090 debug_atomic_inc(hardirqs_off_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003091 } else
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003092 debug_atomic_inc(redundant_hardirqs_off);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003093}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003094
3095/*
3096 * Softirqs will be enabled:
3097 */
3098void trace_softirqs_on(unsigned long ip)
3099{
3100 struct task_struct *curr = current;
3101
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003102 if (unlikely(!debug_locks || current->lockdep_recursion))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003103 return;
3104
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003105 /*
3106 * We fancy IRQs being disabled here, see softirq.c, avoids
3107 * funny state and nesting things.
3108 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003109 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3110 return;
3111
3112 if (curr->softirqs_enabled) {
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003113 debug_atomic_inc(redundant_softirqs_on);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003114 return;
3115 }
3116
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003117 current->lockdep_recursion = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003118 /*
3119 * We'll do an OFF -> ON transition:
3120 */
3121 curr->softirqs_enabled = 1;
3122 curr->softirq_enable_ip = ip;
3123 curr->softirq_enable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003124 debug_atomic_inc(softirqs_on_events);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003125 /*
3126 * We are going to turn softirqs on, so set the
3127 * usage bit for all held locks, if hardirqs are
3128 * enabled too:
3129 */
3130 if (curr->hardirqs_enabled)
Frederic Weisbecker436a49a2018-12-28 06:02:00 +01003131 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003132 current->lockdep_recursion = 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003133}
3134
3135/*
3136 * Softirqs were disabled:
3137 */
3138void trace_softirqs_off(unsigned long ip)
3139{
3140 struct task_struct *curr = current;
3141
Peter Zijlstradd4e5d32011-06-21 17:17:27 +02003142 if (unlikely(!debug_locks || current->lockdep_recursion))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003143 return;
3144
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003145 /*
3146 * We fancy IRQs being disabled here, see softirq.c
3147 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003148 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3149 return;
3150
3151 if (curr->softirqs_enabled) {
3152 /*
3153 * We have done an ON -> OFF transition:
3154 */
3155 curr->softirqs_enabled = 0;
3156 curr->softirq_disable_ip = ip;
3157 curr->softirq_disable_event = ++curr->irq_events;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003158 debug_atomic_inc(softirqs_off_events);
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003159 /*
3160 * Whoops, we wanted softirqs off, so why aren't they?
3161 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003162 DEBUG_LOCKS_WARN_ON(!softirq_count());
3163 } else
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003164 debug_atomic_inc(redundant_softirqs_off);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003165}
3166
Peter Zijlstra8e182572007-07-19 01:48:54 -07003167static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3168{
3169 /*
3170 * If non-trylock use in a hardirq or softirq context, then
3171 * mark the lock as used in these contexts:
3172 */
3173 if (!hlock->trylock) {
3174 if (hlock->read) {
3175 if (curr->hardirq_context)
3176 if (!mark_lock(curr, hlock,
3177 LOCK_USED_IN_HARDIRQ_READ))
3178 return 0;
3179 if (curr->softirq_context)
3180 if (!mark_lock(curr, hlock,
3181 LOCK_USED_IN_SOFTIRQ_READ))
3182 return 0;
3183 } else {
3184 if (curr->hardirq_context)
3185 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3186 return 0;
3187 if (curr->softirq_context)
3188 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3189 return 0;
3190 }
3191 }
3192 if (!hlock->hardirqs_off) {
3193 if (hlock->read) {
3194 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01003195 LOCK_ENABLED_HARDIRQ_READ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003196 return 0;
3197 if (curr->softirqs_enabled)
3198 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01003199 LOCK_ENABLED_SOFTIRQ_READ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003200 return 0;
3201 } else {
3202 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01003203 LOCK_ENABLED_HARDIRQ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003204 return 0;
3205 if (curr->softirqs_enabled)
3206 if (!mark_lock(curr, hlock,
Peter Zijlstra4fc95e82009-01-22 13:10:52 +01003207 LOCK_ENABLED_SOFTIRQ))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003208 return 0;
3209 }
3210 }
3211
3212 return 1;
3213}
3214
Boqun Fengc2469752016-02-16 13:57:40 +08003215static inline unsigned int task_irq_context(struct task_struct *task)
3216{
3217 return 2 * !!task->hardirq_context + !!task->softirq_context;
3218}
3219
Peter Zijlstra8e182572007-07-19 01:48:54 -07003220static int separate_irq_context(struct task_struct *curr,
3221 struct held_lock *hlock)
3222{
3223 unsigned int depth = curr->lockdep_depth;
3224
3225 /*
3226 * Keep track of points where we cross into an interrupt context:
3227 */
Peter Zijlstra8e182572007-07-19 01:48:54 -07003228 if (depth) {
3229 struct held_lock *prev_hlock;
3230
3231 prev_hlock = curr->held_locks + depth-1;
3232 /*
3233 * If we cross into another context, reset the
3234 * hash key (this also prevents the checking and the
3235 * adding of the dependency to 'prev'):
3236 */
3237 if (prev_hlock->irq_context != hlock->irq_context)
3238 return 1;
3239 }
3240 return 0;
3241}
3242
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003243#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
Peter Zijlstra8e182572007-07-19 01:48:54 -07003244
3245static inline
3246int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3247 enum lock_usage_bit new_bit)
3248{
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003249 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
Peter Zijlstra8e182572007-07-19 01:48:54 -07003250 return 1;
3251}
3252
3253static inline int mark_irqflags(struct task_struct *curr,
3254 struct held_lock *hlock)
3255{
3256 return 1;
3257}
3258
Boqun Fengc2469752016-02-16 13:57:40 +08003259static inline unsigned int task_irq_context(struct task_struct *task)
3260{
3261 return 0;
3262}
3263
Peter Zijlstra8e182572007-07-19 01:48:54 -07003264static inline int separate_irq_context(struct task_struct *curr,
3265 struct held_lock *hlock)
3266{
3267 return 0;
3268}
3269
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003270#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003271
3272/*
Peter Zijlstra8e182572007-07-19 01:48:54 -07003273 * Mark a lock with a usage bit, and validate the state transition:
3274 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02003275static int mark_lock(struct task_struct *curr, struct held_lock *this,
Steven Rostedt0764d232008-05-12 21:20:44 +02003276 enum lock_usage_bit new_bit)
Peter Zijlstra8e182572007-07-19 01:48:54 -07003277{
3278 unsigned int new_mask = 1 << new_bit, ret = 1;
3279
3280 /*
3281 * If already set then do not dirty the cacheline,
3282 * nor do any checks:
3283 */
Dave Jonesf82b2172008-08-11 09:30:23 +02003284 if (likely(hlock_class(this)->usage_mask & new_mask))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003285 return 1;
3286
3287 if (!graph_lock())
3288 return 0;
3289 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003290 * Make sure we didn't race:
Peter Zijlstra8e182572007-07-19 01:48:54 -07003291 */
Dave Jonesf82b2172008-08-11 09:30:23 +02003292 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
Peter Zijlstra8e182572007-07-19 01:48:54 -07003293 graph_unlock();
3294 return 1;
3295 }
3296
Dave Jonesf82b2172008-08-11 09:30:23 +02003297 hlock_class(this)->usage_mask |= new_mask;
Peter Zijlstra8e182572007-07-19 01:48:54 -07003298
Dave Jonesf82b2172008-08-11 09:30:23 +02003299 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003300 return 0;
3301
3302 switch (new_bit) {
Peter Zijlstra53464172009-01-22 14:15:53 +01003303#define LOCKDEP_STATE(__STATE) \
3304 case LOCK_USED_IN_##__STATE: \
3305 case LOCK_USED_IN_##__STATE##_READ: \
3306 case LOCK_ENABLED_##__STATE: \
3307 case LOCK_ENABLED_##__STATE##_READ:
3308#include "lockdep_states.h"
3309#undef LOCKDEP_STATE
Peter Zijlstra8e182572007-07-19 01:48:54 -07003310 ret = mark_lock_irq(curr, this, new_bit);
3311 if (!ret)
3312 return 0;
3313 break;
3314 case LOCK_USED:
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +02003315 debug_atomic_dec(nr_unused_locks);
Peter Zijlstra8e182572007-07-19 01:48:54 -07003316 break;
3317 default:
3318 if (!debug_locks_off_graph_unlock())
3319 return 0;
3320 WARN_ON(1);
3321 return 0;
3322 }
3323
3324 graph_unlock();
3325
3326 /*
3327 * We must printk outside of the graph_lock:
3328 */
3329 if (ret == 2) {
3330 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3331 print_lock(this);
3332 print_irqtrace_events(curr);
3333 dump_stack();
3334 }
3335
3336 return ret;
3337}
3338
3339/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003340 * Initialize a lock instance's lock-class mapping info:
3341 */
Bart Van Assched35568b2018-12-06 17:11:33 -08003342void lockdep_init_map(struct lockdep_map *lock, const char *name,
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04003343 struct lock_class_key *key, int subclass)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003344{
Yong Zhangd3d03d42011-11-09 16:04:51 +08003345 int i;
3346
Yong Zhangd3d03d42011-11-09 16:04:51 +08003347 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3348 lock->class_cache[i] = NULL;
Hitoshi Mitake62016252010-10-05 18:01:51 +09003349
Peter Zijlstrac8a25002009-04-17 09:40:49 +02003350#ifdef CONFIG_LOCK_STAT
3351 lock->cpu = raw_smp_processor_id();
3352#endif
3353
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003354 /*
3355 * Can't be having no nameless bastards around this place!
3356 */
Peter Zijlstrac8a25002009-04-17 09:40:49 +02003357 if (DEBUG_LOCKS_WARN_ON(!name)) {
3358 lock->name = "NULL";
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003359 return;
Peter Zijlstrac8a25002009-04-17 09:40:49 +02003360 }
3361
3362 lock->name = name;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003363
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003364 /*
3365 * No key, no joy, we need to hash something.
3366 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003367 if (DEBUG_LOCKS_WARN_ON(!key))
3368 return;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003369 /*
3370 * Sanity check, the lock-class key must be persistent:
3371 */
3372 if (!static_obj(key)) {
Borislav Petkov04860d42018-02-26 14:49:26 +01003373 printk("BUG: key %px not in .data!\n", key);
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003374 /*
3375 * What it says above ^^^^^, I suggest you read it.
3376 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003377 DEBUG_LOCKS_WARN_ON(1);
3378 return;
3379 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003380 lock->key = key;
Peter Zijlstrac8a25002009-04-17 09:40:49 +02003381
3382 if (unlikely(!debug_locks))
3383 return;
3384
Peter Zijlstra35a93932015-02-26 16:23:11 +01003385 if (subclass) {
3386 unsigned long flags;
3387
3388 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3389 return;
3390
3391 raw_local_irq_save(flags);
3392 current->lockdep_recursion = 1;
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04003393 register_lock_class(lock, subclass, 1);
Peter Zijlstra35a93932015-02-26 16:23:11 +01003394 current->lockdep_recursion = 0;
3395 raw_local_irq_restore(flags);
3396 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003397}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003398EXPORT_SYMBOL_GPL(lockdep_init_map);
3399
Peter Zijlstra1704f472010-03-19 01:37:42 +01003400struct lock_class_key __lockdep_no_validate__;
Kent Overstreetea6749c2012-12-27 22:21:58 -08003401EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
Peter Zijlstra1704f472010-03-19 01:37:42 +01003402
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003403static int
3404print_lock_nested_lock_not_held(struct task_struct *curr,
3405 struct held_lock *hlock,
3406 unsigned long ip)
3407{
3408 if (!debug_locks_off())
3409 return 0;
3410 if (debug_locks_silent)
3411 return 0;
3412
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003413 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08003414 pr_warn("==================================\n");
3415 pr_warn("WARNING: Nested lock was not taken\n");
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003416 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08003417 pr_warn("----------------------------------\n");
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003418
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003419 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003420 print_lock(hlock);
3421
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003422 pr_warn("\nbut this task is not holding:\n");
3423 pr_warn("%s\n", hlock->nest_lock->name);
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003424
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003425 pr_warn("\nstack backtrace:\n");
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003426 dump_stack();
3427
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003428 pr_warn("\nother info that might help us debug this:\n");
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003429 lockdep_print_held_locks(curr);
3430
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003431 pr_warn("\nstack backtrace:\n");
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003432 dump_stack();
3433
3434 return 0;
3435}
3436
Matthew Wilcox08f36ff2018-01-17 07:14:13 -08003437static int __lock_is_held(const struct lockdep_map *lock, int read);
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003438
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003439/*
3440 * This gets called for every mutex_lock*()/spin_lock*() operation.
3441 * We maintain the dependency maps and validate the locking attempt:
Waiman Long8ee10862018-10-02 16:19:17 -04003442 *
3443 * The callers must make sure that IRQs are disabled before calling it,
3444 * otherwise we could get an interrupt which would want to take locks,
3445 * which would end up in lockdep again.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003446 */
3447static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3448 int trylock, int read, int check, int hardirqs_off,
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003449 struct lockdep_map *nest_lock, unsigned long ip,
Peter Zijlstra21199f22015-09-16 16:10:40 +02003450 int references, int pin_count)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003451{
3452 struct task_struct *curr = current;
Ingo Molnard6d897c2006-07-10 04:44:04 -07003453 struct lock_class *class = NULL;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003454 struct held_lock *hlock;
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01003455 unsigned int depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003456 int chain_head = 0;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003457 int class_idx;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003458 u64 chain_key;
3459
3460 if (unlikely(!debug_locks))
3461 return 0;
3462
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +01003463 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3464 check = 0;
Peter Zijlstra1704f472010-03-19 01:37:42 +01003465
Hitoshi Mitake62016252010-10-05 18:01:51 +09003466 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3467 class = lock->class_cache[subclass];
Ingo Molnard6d897c2006-07-10 04:44:04 -07003468 /*
Hitoshi Mitake62016252010-10-05 18:01:51 +09003469 * Not cached?
Ingo Molnard6d897c2006-07-10 04:44:04 -07003470 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003471 if (unlikely(!class)) {
Peter Zijlstra4dfbb9d2006-10-11 01:45:14 -04003472 class = register_lock_class(lock, subclass, 0);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003473 if (!class)
3474 return 0;
3475 }
Waiman Long8ca2b56c2018-10-03 13:07:18 -04003476
3477 debug_class_ops_inc(class);
3478
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003479 if (very_verbose(class)) {
Borislav Petkov04860d42018-02-26 14:49:26 +01003480 printk("\nacquire class [%px] %s", class->key, class->name);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003481 if (class->name_version > 1)
Dmitry Vyukovf943fe02016-11-28 15:24:43 +01003482 printk(KERN_CONT "#%d", class->name_version);
3483 printk(KERN_CONT "\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003484 dump_stack();
3485 }
3486
3487 /*
3488 * Add the lock to the list of currently held locks.
3489 * (we dont increase the depth just yet, up until the
3490 * dependency checks are done)
3491 */
3492 depth = curr->lockdep_depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003493 /*
3494 * Ran out of static storage for our per-task lock stack again have we?
3495 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003496 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3497 return 0;
3498
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003499 class_idx = class - lock_classes + 1;
3500
Ingo Molnare966eae2017-12-12 12:31:16 +01003501 if (depth) {
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003502 hlock = curr->held_locks + depth - 1;
3503 if (hlock->class_idx == class_idx && nest_lock) {
Peter Zijlstra7fb4a2c2017-03-01 16:23:30 +01003504 if (hlock->references) {
3505 /*
3506 * Check: unsigned int references:12, overflow.
3507 */
3508 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3509 return 0;
3510
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003511 hlock->references++;
Peter Zijlstra7fb4a2c2017-03-01 16:23:30 +01003512 } else {
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003513 hlock->references = 2;
Peter Zijlstra7fb4a2c2017-03-01 16:23:30 +01003514 }
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003515
3516 return 1;
3517 }
3518 }
3519
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003520 hlock = curr->held_locks + depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003521 /*
3522 * Plain impossible, we just registered it and checked it weren't no
3523 * NULL like.. I bet this mushroom I ate was good!
3524 */
Dave Jonesf82b2172008-08-11 09:30:23 +02003525 if (DEBUG_LOCKS_WARN_ON(!class))
3526 return 0;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003527 hlock->class_idx = class_idx;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003528 hlock->acquire_ip = ip;
3529 hlock->instance = lock;
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02003530 hlock->nest_lock = nest_lock;
Boqun Fengc2469752016-02-16 13:57:40 +08003531 hlock->irq_context = task_irq_context(curr);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003532 hlock->trylock = trylock;
3533 hlock->read = read;
3534 hlock->check = check;
Dmitry Baryshkov6951b122008-08-18 04:26:37 +04003535 hlock->hardirqs_off = !!hardirqs_off;
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003536 hlock->references = references;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003537#ifdef CONFIG_LOCK_STAT
3538 hlock->waittime_stamp = 0;
Peter Zijlstra3365e7792009-10-09 10:12:41 +02003539 hlock->holdtime_stamp = lockstat_clock();
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003540#endif
Peter Zijlstra21199f22015-09-16 16:10:40 +02003541 hlock->pin_count = pin_count;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003542
Oleg Nesterovfb9edbe2014-01-20 19:20:06 +01003543 if (check && !mark_irqflags(curr, hlock))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003544 return 0;
3545
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003546 /* mark it as used: */
Jarek Poplawski4ff773bb2007-05-08 00:31:00 -07003547 if (!mark_lock(curr, hlock, LOCK_USED))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003548 return 0;
Peter Zijlstra8e182572007-07-19 01:48:54 -07003549
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003550 /*
Gautham R Shenoy17aacfb92007-10-28 20:47:01 +01003551 * Calculate the chain hash: it's the combined hash of all the
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003552 * lock keys along the dependency chain. We save the hash value
3553 * at every step so that we can get the current hash easily
3554 * after unlock. The chain hash is then used to cache dependency
3555 * results.
3556 *
3557 * The 'key ID' is what is the most compact key value to drive
3558 * the hash, not class->key.
3559 */
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003560 /*
3561 * Whoops, we did it again.. ran straight out of our static allocation.
3562 */
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01003563 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003564 return 0;
3565
3566 chain_key = curr->curr_chain_key;
3567 if (!depth) {
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003568 /*
3569 * How can we have a chain hash when we ain't got no keys?!
3570 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003571 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3572 return 0;
3573 chain_head = 1;
3574 }
3575
3576 hlock->prev_chain_key = chain_key;
Peter Zijlstra8e182572007-07-19 01:48:54 -07003577 if (separate_irq_context(curr, hlock)) {
3578 chain_key = 0;
3579 chain_head = 1;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003580 }
Alfredo Alvarez Fernandez5f18ab52016-02-11 00:33:32 +01003581 chain_key = iterate_chain_key(chain_key, class_idx);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003582
Peter Zijlstraf8319482016-11-30 14:32:25 +11003583 if (nest_lock && !__lock_is_held(nest_lock, -1))
Maarten Lankhorstd0945952012-09-13 11:39:51 +02003584 return print_lock_nested_lock_not_held(curr, hlock, ip);
3585
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08003586 if (!debug_locks_silent) {
3587 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
3588 WARN_ON_ONCE(!hlock_class(hlock)->key);
3589 }
3590
Gregory Haskins3aa416b2007-10-11 22:11:11 +02003591 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
Peter Zijlstra8e182572007-07-19 01:48:54 -07003592 return 0;
Jarek Poplawski381a2292007-02-10 01:44:58 -08003593
Gregory Haskins3aa416b2007-10-11 22:11:11 +02003594 curr->curr_chain_key = chain_key;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003595 curr->lockdep_depth++;
3596 check_chain_key(curr);
Jarek Poplawski60e114d2007-02-20 13:58:00 -08003597#ifdef CONFIG_DEBUG_LOCKDEP
3598 if (unlikely(!debug_locks))
3599 return 0;
3600#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003601 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3602 debug_locks_off();
Dave Jones2c522832013-04-25 13:40:02 -04003603 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3604 printk(KERN_DEBUG "depth: %i max: %lu!\n",
Ben Greearc0540602013-02-06 10:56:19 -08003605 curr->lockdep_depth, MAX_LOCK_DEPTH);
Ben Greearc0540602013-02-06 10:56:19 -08003606
3607 lockdep_print_held_locks(current);
3608 debug_show_all_locks();
Peter Zijlstraeedeeab2009-03-18 12:38:47 +01003609 dump_stack();
Ben Greearc0540602013-02-06 10:56:19 -08003610
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003611 return 0;
3612 }
Jarek Poplawski381a2292007-02-10 01:44:58 -08003613
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003614 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3615 max_lockdep_depth = curr->lockdep_depth;
3616
3617 return 1;
3618}
3619
3620static int
Srivatsa S. Bhatf86f7552013-01-08 18:35:58 +05303621print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003622 unsigned long ip)
3623{
3624 if (!debug_locks_off())
3625 return 0;
3626 if (debug_locks_silent)
3627 return 0;
3628
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003629 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08003630 pr_warn("=====================================\n");
3631 pr_warn("WARNING: bad unlock balance detected!\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01003632 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08003633 pr_warn("-------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003634 pr_warn("%s/%d is trying to release lock (",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07003635 curr->comm, task_pid_nr(curr));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003636 print_lockdep_cache(lock);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003637 pr_cont(") at:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003638 print_ip_sym(ip);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003639 pr_warn("but there are no more locks to release!\n");
3640 pr_warn("\nother info that might help us debug this:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003641 lockdep_print_held_locks(curr);
3642
Paul E. McKenney681fbec2017-05-04 15:44:38 -07003643 pr_warn("\nstack backtrace:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003644 dump_stack();
3645
3646 return 0;
3647}
3648
Matthew Wilcox08f36ff2018-01-17 07:14:13 -08003649static int match_held_lock(const struct held_lock *hlock,
3650 const struct lockdep_map *lock)
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003651{
3652 if (hlock->instance == lock)
3653 return 1;
3654
3655 if (hlock->references) {
Matthew Wilcox08f36ff2018-01-17 07:14:13 -08003656 const struct lock_class *class = lock->class_cache[0];
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003657
3658 if (!class)
3659 class = look_up_lock_class(lock, 0);
3660
Peter Zijlstra80e04012011-08-05 14:26:17 +02003661 /*
3662 * If look_up_lock_class() failed to find a class, we're trying
3663 * to test if we hold a lock that has never yet been acquired.
3664 * Clearly if the lock hasn't been acquired _ever_, we're not
3665 * holding it either, so report failure.
3666 */
Matthew Wilcox64f29d12018-01-17 07:14:12 -08003667 if (!class)
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003668 return 0;
3669
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003670 /*
3671 * References, but not a lock we're actually ref-counting?
3672 * State got messed up, follow the sites that change ->references
3673 * and try to make sense of it.
3674 */
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003675 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3676 return 0;
3677
3678 if (hlock->class_idx == class - lock_classes + 1)
3679 return 1;
3680 }
3681
3682 return 0;
3683}
3684
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09003685/* @depth must not be zero */
3686static struct held_lock *find_held_lock(struct task_struct *curr,
3687 struct lockdep_map *lock,
3688 unsigned int depth, int *idx)
3689{
3690 struct held_lock *ret, *hlock, *prev_hlock;
3691 int i;
3692
3693 i = depth - 1;
3694 hlock = curr->held_locks + i;
3695 ret = hlock;
3696 if (match_held_lock(hlock, lock))
3697 goto out;
3698
3699 ret = NULL;
3700 for (i--, prev_hlock = hlock--;
3701 i >= 0;
3702 i--, prev_hlock = hlock--) {
3703 /*
3704 * We must not cross into another context:
3705 */
3706 if (prev_hlock->irq_context != hlock->irq_context) {
3707 ret = NULL;
3708 break;
3709 }
3710 if (match_held_lock(hlock, lock)) {
3711 ret = hlock;
3712 break;
3713 }
3714 }
3715
3716out:
3717 *idx = i;
3718 return ret;
3719}
3720
J. R. Okajimae9699702017-02-03 01:38:16 +09003721static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3722 int idx)
3723{
3724 struct held_lock *hlock;
3725
Waiman Long8ee10862018-10-02 16:19:17 -04003726 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3727 return 0;
3728
J. R. Okajimae9699702017-02-03 01:38:16 +09003729 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3730 if (!__lock_acquire(hlock->instance,
3731 hlock_class(hlock)->subclass,
3732 hlock->trylock,
3733 hlock->read, hlock->check,
3734 hlock->hardirqs_off,
3735 hlock->nest_lock, hlock->acquire_ip,
3736 hlock->references, hlock->pin_count))
3737 return 1;
3738 }
3739 return 0;
3740}
3741
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003742static int
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003743__lock_set_class(struct lockdep_map *lock, const char *name,
3744 struct lock_class_key *key, unsigned int subclass,
3745 unsigned long ip)
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003746{
3747 struct task_struct *curr = current;
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09003748 struct held_lock *hlock;
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003749 struct lock_class *class;
3750 unsigned int depth;
3751 int i;
3752
Waiman Long513e1072019-01-09 23:03:25 -05003753 if (unlikely(!debug_locks))
3754 return 0;
3755
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003756 depth = curr->lockdep_depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003757 /*
3758 * This function is about (re)setting the class of a held lock,
3759 * yet we're not actually holding any locks. Naughty user!
3760 */
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003761 if (DEBUG_LOCKS_WARN_ON(!depth))
3762 return 0;
3763
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09003764 hlock = find_held_lock(curr, lock, depth, &i);
3765 if (!hlock)
3766 return print_unlock_imbalance_bug(curr, lock, ip);
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003767
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01003768 lockdep_init_map(lock, name, key, 0);
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003769 class = register_lock_class(lock, subclass, 0);
Dave Jonesf82b2172008-08-11 09:30:23 +02003770 hlock->class_idx = class - lock_classes + 1;
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003771
3772 curr->lockdep_depth = i;
3773 curr->curr_chain_key = hlock->prev_chain_key;
3774
J. R. Okajimae9699702017-02-03 01:38:16 +09003775 if (reacquire_held_locks(curr, depth, i))
3776 return 0;
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003777
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003778 /*
3779 * I took it apart and put it back together again, except now I have
3780 * these 'spare' parts.. where shall I put them.
3781 */
Peter Zijlstra64aa3482008-08-11 09:30:21 +02003782 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3783 return 0;
3784 return 1;
3785}
3786
J. R. Okajima6419c4a2017-02-03 01:38:17 +09003787static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3788{
3789 struct task_struct *curr = current;
3790 struct held_lock *hlock;
3791 unsigned int depth;
3792 int i;
3793
Waiman Long71492582019-01-09 23:03:25 -05003794 if (unlikely(!debug_locks))
3795 return 0;
3796
J. R. Okajima6419c4a2017-02-03 01:38:17 +09003797 depth = curr->lockdep_depth;
3798 /*
3799 * This function is about (re)setting the class of a held lock,
3800 * yet we're not actually holding any locks. Naughty user!
3801 */
3802 if (DEBUG_LOCKS_WARN_ON(!depth))
3803 return 0;
3804
3805 hlock = find_held_lock(curr, lock, depth, &i);
3806 if (!hlock)
3807 return print_unlock_imbalance_bug(curr, lock, ip);
3808
3809 curr->lockdep_depth = i;
3810 curr->curr_chain_key = hlock->prev_chain_key;
3811
3812 WARN(hlock->read, "downgrading a read lock");
3813 hlock->read = 1;
3814 hlock->acquire_ip = ip;
3815
3816 if (reacquire_held_locks(curr, depth, i))
3817 return 0;
3818
3819 /*
3820 * I took it apart and put it back together again, except now I have
3821 * these 'spare' parts.. where shall I put them.
3822 */
3823 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3824 return 0;
3825 return 1;
3826}
3827
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003828/*
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003829 * Remove the lock to the list of currently held locks - this gets
3830 * called on mutex_unlock()/spin_unlock*() (or on a failed
3831 * mutex_lock_interruptible()).
3832 *
3833 * @nested is an hysterical artifact, needs a tree wide cleanup.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003834 */
3835static int
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003836__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003837{
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003838 struct task_struct *curr = current;
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09003839 struct held_lock *hlock;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003840 unsigned int depth;
Ingo Molnare966eae2017-12-12 12:31:16 +01003841 int i;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003842
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003843 if (unlikely(!debug_locks))
3844 return 0;
3845
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003846 depth = curr->lockdep_depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003847 /*
3848 * So we're all set to release this lock.. wait what lock? We don't
3849 * own any locks, you've been drinking again?
3850 */
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003851 if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3852 return print_unlock_imbalance_bug(curr, lock, ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003853
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003854 /*
3855 * Check whether the lock exists in the current stack
3856 * of held locks:
3857 */
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09003858 hlock = find_held_lock(curr, lock, depth, &i);
3859 if (!hlock)
3860 return print_unlock_imbalance_bug(curr, lock, ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003861
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003862 if (hlock->instance == lock)
3863 lock_release_holdtime(hlock);
3864
Peter Zijlstraa24fc602015-06-11 14:46:53 +02003865 WARN(hlock->pin_count, "releasing a pinned lock\n");
3866
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003867 if (hlock->references) {
3868 hlock->references--;
3869 if (hlock->references) {
3870 /*
3871 * We had, and after removing one, still have
3872 * references, the current lock stack is still
3873 * valid. We're done!
3874 */
3875 return 1;
3876 }
3877 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07003878
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003879 /*
3880 * We have the right lock to unlock, 'hlock' points to it.
3881 * Now we remove it from the stack, and add back the other
3882 * entries (if any), recalculating the hash along the way:
3883 */
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003884
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003885 curr->lockdep_depth = i;
3886 curr->curr_chain_key = hlock->prev_chain_key;
3887
Waiman Longce52a182018-10-02 16:19:18 -04003888 /*
3889 * The most likely case is when the unlock is on the innermost
3890 * lock. In this case, we are done!
3891 */
3892 if (i == depth-1)
3893 return 1;
3894
J. R. Okajimae9699702017-02-03 01:38:16 +09003895 if (reacquire_held_locks(curr, depth, i + 1))
3896 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003897
Peter Zijlstra0119fee2011-09-02 01:30:29 +02003898 /*
3899 * We had N bottles of beer on the wall, we drank one, but now
3900 * there's not N-1 bottles of beer left on the wall...
3901 */
Waiman Longce52a182018-10-02 16:19:18 -04003902 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02003903
Waiman Longce52a182018-10-02 16:19:18 -04003904 /*
3905 * Since reacquire_held_locks() would have called check_chain_key()
3906 * indirectly via __lock_acquire(), we don't need to do it again
3907 * on return.
3908 */
3909 return 0;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07003910}
3911
Matthew Wilcox08f36ff2018-01-17 07:14:13 -08003912static int __lock_is_held(const struct lockdep_map *lock, int read)
Peter Zijlstraf607c662009-07-20 19:16:29 +02003913{
3914 struct task_struct *curr = current;
3915 int i;
3916
3917 for (i = 0; i < curr->lockdep_depth; i++) {
Peter Zijlstrabb97a912009-07-20 19:15:35 +02003918 struct held_lock *hlock = curr->held_locks + i;
3919
Peter Zijlstraf8319482016-11-30 14:32:25 +11003920 if (match_held_lock(hlock, lock)) {
3921 if (read == -1 || hlock->read == read)
3922 return 1;
3923
3924 return 0;
3925 }
Peter Zijlstraf607c662009-07-20 19:16:29 +02003926 }
3927
3928 return 0;
3929}
3930
Peter Zijlstrae7904a22015-08-01 19:25:08 +02003931static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3932{
3933 struct pin_cookie cookie = NIL_COOKIE;
3934 struct task_struct *curr = current;
3935 int i;
3936
3937 if (unlikely(!debug_locks))
3938 return cookie;
3939
3940 for (i = 0; i < curr->lockdep_depth; i++) {
3941 struct held_lock *hlock = curr->held_locks + i;
3942
3943 if (match_held_lock(hlock, lock)) {
3944 /*
3945 * Grab 16bits of randomness; this is sufficient to not
3946 * be guessable and still allows some pin nesting in
3947 * our u32 pin_count.
3948 */
3949 cookie.val = 1 + (prandom_u32() >> 16);
3950 hlock->pin_count += cookie.val;
3951 return cookie;
3952 }
3953 }
3954
3955 WARN(1, "pinning an unheld lock\n");
3956 return cookie;
3957}
3958
3959static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
Peter Zijlstraa24fc602015-06-11 14:46:53 +02003960{
3961 struct task_struct *curr = current;
3962 int i;
3963
3964 if (unlikely(!debug_locks))
3965 return;
3966
3967 for (i = 0; i < curr->lockdep_depth; i++) {
3968 struct held_lock *hlock = curr->held_locks + i;
3969
3970 if (match_held_lock(hlock, lock)) {
Peter Zijlstrae7904a22015-08-01 19:25:08 +02003971 hlock->pin_count += cookie.val;
Peter Zijlstraa24fc602015-06-11 14:46:53 +02003972 return;
3973 }
3974 }
3975
3976 WARN(1, "pinning an unheld lock\n");
3977}
3978
Peter Zijlstrae7904a22015-08-01 19:25:08 +02003979static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
Peter Zijlstraa24fc602015-06-11 14:46:53 +02003980{
3981 struct task_struct *curr = current;
3982 int i;
3983
3984 if (unlikely(!debug_locks))
3985 return;
3986
3987 for (i = 0; i < curr->lockdep_depth; i++) {
3988 struct held_lock *hlock = curr->held_locks + i;
3989
3990 if (match_held_lock(hlock, lock)) {
3991 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3992 return;
3993
Peter Zijlstrae7904a22015-08-01 19:25:08 +02003994 hlock->pin_count -= cookie.val;
3995
3996 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3997 hlock->pin_count = 0;
3998
Peter Zijlstraa24fc602015-06-11 14:46:53 +02003999 return;
4000 }
4001 }
4002
4003 WARN(1, "unpinning an unheld lock\n");
4004}
4005
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004006/*
4007 * Check whether we follow the irq-flags state precisely:
4008 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02004009static void check_flags(unsigned long flags)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004010{
Ingo Molnar992860e2008-07-14 10:28:38 +02004011#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4012 defined(CONFIG_TRACE_IRQFLAGS)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004013 if (!debug_locks)
4014 return;
4015
Ingo Molnar5f9fa8a2007-12-07 19:02:47 +01004016 if (irqs_disabled_flags(flags)) {
4017 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4018 printk("possible reason: unannotated irqs-off.\n");
4019 }
4020 } else {
4021 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4022 printk("possible reason: unannotated irqs-on.\n");
4023 }
4024 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004025
4026 /*
4027 * We dont accurately track softirq state in e.g.
4028 * hardirq contexts (such as on 4KSTACKS), so only
4029 * check if not in hardirq contexts:
4030 */
4031 if (!hardirq_count()) {
Peter Zijlstra0119fee2011-09-02 01:30:29 +02004032 if (softirq_count()) {
4033 /* like the above, but with softirqs */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004034 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
Peter Zijlstra0119fee2011-09-02 01:30:29 +02004035 } else {
4036 /* lick the above, does it taste good? */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004037 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
Peter Zijlstra0119fee2011-09-02 01:30:29 +02004038 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004039 }
4040
4041 if (!debug_locks)
4042 print_irqtrace_events(current);
4043#endif
4044}
4045
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01004046void lock_set_class(struct lockdep_map *lock, const char *name,
4047 struct lock_class_key *key, unsigned int subclass,
4048 unsigned long ip)
Peter Zijlstra64aa3482008-08-11 09:30:21 +02004049{
4050 unsigned long flags;
4051
4052 if (unlikely(current->lockdep_recursion))
4053 return;
4054
4055 raw_local_irq_save(flags);
4056 current->lockdep_recursion = 1;
4057 check_flags(flags);
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01004058 if (__lock_set_class(lock, name, key, subclass, ip))
Peter Zijlstra64aa3482008-08-11 09:30:21 +02004059 check_chain_key(current);
4060 current->lockdep_recursion = 0;
4061 raw_local_irq_restore(flags);
4062}
Peter Zijlstra00ef9f72008-12-04 09:00:17 +01004063EXPORT_SYMBOL_GPL(lock_set_class);
Peter Zijlstra64aa3482008-08-11 09:30:21 +02004064
J. R. Okajima6419c4a2017-02-03 01:38:17 +09004065void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4066{
4067 unsigned long flags;
4068
4069 if (unlikely(current->lockdep_recursion))
4070 return;
4071
4072 raw_local_irq_save(flags);
4073 current->lockdep_recursion = 1;
4074 check_flags(flags);
4075 if (__lock_downgrade(lock, ip))
4076 check_chain_key(current);
4077 current->lockdep_recursion = 0;
4078 raw_local_irq_restore(flags);
4079}
4080EXPORT_SYMBOL_GPL(lock_downgrade);
4081
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004082/*
4083 * We are not always called with irqs disabled - do that here,
4084 * and also avoid lockdep recursion:
4085 */
Steven Rostedt1d09daa2008-05-12 21:20:55 +02004086void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
Peter Zijlstra7531e2f2008-08-11 09:30:24 +02004087 int trylock, int read, int check,
4088 struct lockdep_map *nest_lock, unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004089{
4090 unsigned long flags;
4091
4092 if (unlikely(current->lockdep_recursion))
4093 return;
4094
4095 raw_local_irq_save(flags);
4096 check_flags(flags);
4097
4098 current->lockdep_recursion = 1;
Frederic Weisbeckerdb2c4c72010-02-02 23:34:40 +01004099 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004100 __lock_acquire(lock, subclass, trylock, read, check,
Peter Zijlstra21199f22015-09-16 16:10:40 +02004101 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004102 current->lockdep_recursion = 0;
4103 raw_local_irq_restore(flags);
4104}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004105EXPORT_SYMBOL_GPL(lock_acquire);
4106
Steven Rostedt1d09daa2008-05-12 21:20:55 +02004107void lock_release(struct lockdep_map *lock, int nested,
Steven Rostedt0764d232008-05-12 21:20:44 +02004108 unsigned long ip)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004109{
4110 unsigned long flags;
4111
4112 if (unlikely(current->lockdep_recursion))
4113 return;
4114
4115 raw_local_irq_save(flags);
4116 check_flags(flags);
4117 current->lockdep_recursion = 1;
Frederic Weisbecker93135432010-05-08 06:24:25 +02004118 trace_lock_release(lock, ip);
Peter Zijlstrae0f56fd2015-06-11 14:46:52 +02004119 if (__lock_release(lock, nested, ip))
4120 check_chain_key(current);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004121 current->lockdep_recursion = 0;
4122 raw_local_irq_restore(flags);
4123}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004124EXPORT_SYMBOL_GPL(lock_release);
4125
Matthew Wilcox08f36ff2018-01-17 07:14:13 -08004126int lock_is_held_type(const struct lockdep_map *lock, int read)
Peter Zijlstraf607c662009-07-20 19:16:29 +02004127{
4128 unsigned long flags;
4129 int ret = 0;
4130
4131 if (unlikely(current->lockdep_recursion))
Peter Zijlstraf2513cd2011-06-06 12:32:43 +02004132 return 1; /* avoid false negative lockdep_assert_held() */
Peter Zijlstraf607c662009-07-20 19:16:29 +02004133
4134 raw_local_irq_save(flags);
4135 check_flags(flags);
4136
4137 current->lockdep_recursion = 1;
Peter Zijlstraf8319482016-11-30 14:32:25 +11004138 ret = __lock_is_held(lock, read);
Peter Zijlstraf607c662009-07-20 19:16:29 +02004139 current->lockdep_recursion = 0;
4140 raw_local_irq_restore(flags);
4141
4142 return ret;
4143}
Peter Zijlstraf8319482016-11-30 14:32:25 +11004144EXPORT_SYMBOL_GPL(lock_is_held_type);
Peter Zijlstraf607c662009-07-20 19:16:29 +02004145
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004146struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004147{
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004148 struct pin_cookie cookie = NIL_COOKIE;
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004149 unsigned long flags;
4150
4151 if (unlikely(current->lockdep_recursion))
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004152 return cookie;
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004153
4154 raw_local_irq_save(flags);
4155 check_flags(flags);
4156
4157 current->lockdep_recursion = 1;
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004158 cookie = __lock_pin_lock(lock);
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004159 current->lockdep_recursion = 0;
4160 raw_local_irq_restore(flags);
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004161
4162 return cookie;
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004163}
4164EXPORT_SYMBOL_GPL(lock_pin_lock);
4165
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004166void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004167{
4168 unsigned long flags;
4169
4170 if (unlikely(current->lockdep_recursion))
4171 return;
4172
4173 raw_local_irq_save(flags);
4174 check_flags(flags);
4175
4176 current->lockdep_recursion = 1;
Peter Zijlstrae7904a22015-08-01 19:25:08 +02004177 __lock_repin_lock(lock, cookie);
4178 current->lockdep_recursion = 0;
4179 raw_local_irq_restore(flags);
4180}
4181EXPORT_SYMBOL_GPL(lock_repin_lock);
4182
4183void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4184{
4185 unsigned long flags;
4186
4187 if (unlikely(current->lockdep_recursion))
4188 return;
4189
4190 raw_local_irq_save(flags);
4191 check_flags(flags);
4192
4193 current->lockdep_recursion = 1;
4194 __lock_unpin_lock(lock, cookie);
Peter Zijlstraa24fc602015-06-11 14:46:53 +02004195 current->lockdep_recursion = 0;
4196 raw_local_irq_restore(flags);
4197}
4198EXPORT_SYMBOL_GPL(lock_unpin_lock);
4199
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004200#ifdef CONFIG_LOCK_STAT
4201static int
4202print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4203 unsigned long ip)
4204{
4205 if (!debug_locks_off())
4206 return 0;
4207 if (debug_locks_silent)
4208 return 0;
4209
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004210 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004211 pr_warn("=================================\n");
4212 pr_warn("WARNING: bad contention detected!\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01004213 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004214 pr_warn("---------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004215 pr_warn("%s/%d is trying to contend lock (",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07004216 curr->comm, task_pid_nr(curr));
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004217 print_lockdep_cache(lock);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004218 pr_cont(") at:\n");
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004219 print_ip_sym(ip);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004220 pr_warn("but there are no locks held!\n");
4221 pr_warn("\nother info that might help us debug this:\n");
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004222 lockdep_print_held_locks(curr);
4223
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004224 pr_warn("\nstack backtrace:\n");
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004225 dump_stack();
4226
4227 return 0;
4228}
4229
4230static void
4231__lock_contended(struct lockdep_map *lock, unsigned long ip)
4232{
4233 struct task_struct *curr = current;
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09004234 struct held_lock *hlock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004235 struct lock_class_stats *stats;
4236 unsigned int depth;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004237 int i, contention_point, contending_point;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004238
4239 depth = curr->lockdep_depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02004240 /*
4241 * Whee, we contended on this lock, except it seems we're not
4242 * actually trying to acquire anything much at all..
4243 */
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004244 if (DEBUG_LOCKS_WARN_ON(!depth))
4245 return;
4246
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09004247 hlock = find_held_lock(curr, lock, depth, &i);
4248 if (!hlock) {
4249 print_lock_contention_bug(curr, lock, ip);
4250 return;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004251 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004252
Peter Zijlstrabb97a912009-07-20 19:15:35 +02004253 if (hlock->instance != lock)
4254 return;
4255
Peter Zijlstra3365e7792009-10-09 10:12:41 +02004256 hlock->waittime_stamp = lockstat_clock();
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004257
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004258 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4259 contending_point = lock_point(hlock_class(hlock)->contending_point,
4260 lock->ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004261
Dave Jonesf82b2172008-08-11 09:30:23 +02004262 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004263 if (contention_point < LOCKSTAT_POINTS)
4264 stats->contention_point[contention_point]++;
4265 if (contending_point < LOCKSTAT_POINTS)
4266 stats->contending_point[contending_point]++;
Peter Zijlstra96645672007-07-19 01:49:00 -07004267 if (lock->cpu != smp_processor_id())
4268 stats->bounces[bounce_contended + !!hlock->read]++;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004269}
4270
4271static void
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004272__lock_acquired(struct lockdep_map *lock, unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004273{
4274 struct task_struct *curr = current;
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09004275 struct held_lock *hlock;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004276 struct lock_class_stats *stats;
4277 unsigned int depth;
Peter Zijlstra3365e7792009-10-09 10:12:41 +02004278 u64 now, waittime = 0;
Peter Zijlstra96645672007-07-19 01:49:00 -07004279 int i, cpu;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004280
4281 depth = curr->lockdep_depth;
Peter Zijlstra0119fee2011-09-02 01:30:29 +02004282 /*
4283 * Yay, we acquired ownership of this lock we didn't try to
4284 * acquire, how the heck did that happen?
4285 */
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004286 if (DEBUG_LOCKS_WARN_ON(!depth))
4287 return;
4288
J. R. Okajima41c2c5b2017-02-03 01:38:15 +09004289 hlock = find_held_lock(curr, lock, depth, &i);
4290 if (!hlock) {
4291 print_lock_contention_bug(curr, lock, _RET_IP_);
4292 return;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004293 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004294
Peter Zijlstrabb97a912009-07-20 19:15:35 +02004295 if (hlock->instance != lock)
4296 return;
4297
Peter Zijlstra96645672007-07-19 01:49:00 -07004298 cpu = smp_processor_id();
4299 if (hlock->waittime_stamp) {
Peter Zijlstra3365e7792009-10-09 10:12:41 +02004300 now = lockstat_clock();
Peter Zijlstra96645672007-07-19 01:49:00 -07004301 waittime = now - hlock->waittime_stamp;
4302 hlock->holdtime_stamp = now;
4303 }
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004304
Frederic Weisbecker883a2a32010-05-08 06:16:11 +02004305 trace_lock_acquired(lock, ip);
Frederic Weisbecker20625012009-04-06 01:49:33 +02004306
Dave Jonesf82b2172008-08-11 09:30:23 +02004307 stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstra96645672007-07-19 01:49:00 -07004308 if (waittime) {
4309 if (hlock->read)
4310 lock_time_inc(&stats->read_waittime, waittime);
4311 else
4312 lock_time_inc(&stats->write_waittime, waittime);
4313 }
4314 if (lock->cpu != cpu)
4315 stats->bounces[bounce_acquired + !!hlock->read]++;
Peter Zijlstra96645672007-07-19 01:49:00 -07004316
4317 lock->cpu = cpu;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004318 lock->ip = ip;
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004319}
4320
4321void lock_contended(struct lockdep_map *lock, unsigned long ip)
4322{
4323 unsigned long flags;
4324
Waiman Long9506a742018-10-18 21:45:17 -04004325 if (unlikely(!lock_stat || !debug_locks))
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004326 return;
4327
4328 if (unlikely(current->lockdep_recursion))
4329 return;
4330
4331 raw_local_irq_save(flags);
4332 check_flags(flags);
4333 current->lockdep_recursion = 1;
Frederic Weisbeckerdb2c4c72010-02-02 23:34:40 +01004334 trace_lock_contended(lock, ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004335 __lock_contended(lock, ip);
4336 current->lockdep_recursion = 0;
4337 raw_local_irq_restore(flags);
4338}
4339EXPORT_SYMBOL_GPL(lock_contended);
4340
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004341void lock_acquired(struct lockdep_map *lock, unsigned long ip)
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004342{
4343 unsigned long flags;
4344
Waiman Long9506a742018-10-18 21:45:17 -04004345 if (unlikely(!lock_stat || !debug_locks))
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004346 return;
4347
4348 if (unlikely(current->lockdep_recursion))
4349 return;
4350
4351 raw_local_irq_save(flags);
4352 check_flags(flags);
4353 current->lockdep_recursion = 1;
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +02004354 __lock_acquired(lock, ip);
Peter Zijlstraf20786f2007-07-19 01:48:56 -07004355 current->lockdep_recursion = 0;
4356 raw_local_irq_restore(flags);
4357}
4358EXPORT_SYMBOL_GPL(lock_acquired);
4359#endif
4360
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004361/*
4362 * Used by the testsuite, sanitize the validator state
4363 * after a simulated failure:
4364 */
4365
4366void lockdep_reset(void)
4367{
4368 unsigned long flags;
Ingo Molnar23d95a02006-12-13 00:34:40 -08004369 int i;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004370
4371 raw_local_irq_save(flags);
4372 current->curr_chain_key = 0;
4373 current->lockdep_depth = 0;
4374 current->lockdep_recursion = 0;
4375 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4376 nr_hardirq_chains = 0;
4377 nr_softirq_chains = 0;
4378 nr_process_chains = 0;
4379 debug_locks = 1;
Ingo Molnar23d95a02006-12-13 00:34:40 -08004380 for (i = 0; i < CHAINHASH_SIZE; i++)
Andrew Mortona63f38c2016-02-03 13:44:12 -08004381 INIT_HLIST_HEAD(chainhash_table + i);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004382 raw_local_irq_restore(flags);
4383}
4384
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004385/* Remove a class from a lock chain. Must be called with the graph lock held. */
Bart Van Asschede4643a2019-02-14 15:00:50 -08004386static void remove_class_from_lock_chain(struct pending_free *pf,
4387 struct lock_chain *chain,
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004388 struct lock_class *class)
4389{
4390#ifdef CONFIG_PROVE_LOCKING
4391 struct lock_chain *new_chain;
4392 u64 chain_key;
4393 int i;
4394
4395 for (i = chain->base; i < chain->base + chain->depth; i++) {
4396 if (chain_hlocks[i] != class - lock_classes)
4397 continue;
4398 /* The code below leaks one chain_hlock[] entry. */
4399 if (--chain->depth > 0)
4400 memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4401 (chain->base + chain->depth - i) *
4402 sizeof(chain_hlocks[0]));
4403 /*
4404 * Each lock class occurs at most once in a lock chain so once
4405 * we found a match we can break out of this loop.
4406 */
4407 goto recalc;
4408 }
4409 /* Since the chain has not been modified, return. */
4410 return;
4411
4412recalc:
4413 chain_key = 0;
4414 for (i = chain->base; i < chain->base + chain->depth; i++)
4415 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
4416 if (chain->depth && chain->chain_key == chain_key)
4417 return;
4418 /* Overwrite the chain key for concurrent RCU readers. */
4419 WRITE_ONCE(chain->chain_key, chain_key);
4420 /*
4421 * Note: calling hlist_del_rcu() from inside a
4422 * hlist_for_each_entry_rcu() loop is safe.
4423 */
4424 hlist_del_rcu(&chain->entry);
Bart Van Asschede4643a2019-02-14 15:00:50 -08004425 __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004426 if (chain->depth == 0)
4427 return;
4428 /*
4429 * If the modified lock chain matches an existing lock chain, drop
4430 * the modified lock chain.
4431 */
4432 if (lookup_chain_cache(chain_key))
4433 return;
Bart Van Asschede4643a2019-02-14 15:00:50 -08004434 new_chain = alloc_lock_chain();
4435 if (WARN_ON_ONCE(!new_chain)) {
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004436 debug_locks_off();
4437 return;
4438 }
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004439 *new_chain = *chain;
4440 hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4441#endif
4442}
4443
4444/* Must be called with the graph lock held. */
Bart Van Asschede4643a2019-02-14 15:00:50 -08004445static void remove_class_from_lock_chains(struct pending_free *pf,
4446 struct lock_class *class)
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004447{
4448 struct lock_chain *chain;
4449 struct hlist_head *head;
4450 int i;
4451
4452 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4453 head = chainhash_table + i;
4454 hlist_for_each_entry_rcu(chain, head, entry) {
Bart Van Asschede4643a2019-02-14 15:00:50 -08004455 remove_class_from_lock_chain(pf, chain, class);
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004456 }
4457 }
4458}
4459
Bart Van Assche786fa292018-12-06 17:11:36 -08004460/*
4461 * Remove all references to a lock class. The caller must hold the graph lock.
4462 */
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004463static void zap_class(struct pending_free *pf, struct lock_class *class)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004464{
Bart Van Assche86cffb82019-02-14 15:00:41 -08004465 struct lock_list *entry;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004466 int i;
4467
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004468 WARN_ON_ONCE(!class->key);
4469
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004470 /*
4471 * Remove all dependencies this lock is
4472 * involved in:
4473 */
Bart Van Asscheace35a72019-02-14 15:00:47 -08004474 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
4475 entry = list_entries + i;
Bart Van Assche86cffb82019-02-14 15:00:41 -08004476 if (entry->class != class && entry->links_to != class)
4477 continue;
Bart Van Asscheace35a72019-02-14 15:00:47 -08004478 __clear_bit(i, list_entries_in_use);
4479 nr_list_entries--;
Bart Van Assche86cffb82019-02-14 15:00:41 -08004480 list_del_rcu(&entry->entry);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004481 }
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004482 if (list_empty(&class->locks_after) &&
4483 list_empty(&class->locks_before)) {
4484 list_move_tail(&class->lock_entry, &pf->zapped);
4485 hlist_del_rcu(&class->hash_entry);
4486 WRITE_ONCE(class->key, NULL);
4487 WRITE_ONCE(class->name, NULL);
4488 nr_lock_classes--;
4489 } else {
4490 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
4491 class->name);
4492 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004493
Bart Van Asschede4643a2019-02-14 15:00:50 -08004494 remove_class_from_lock_chains(pf, class);
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004495}
4496
4497static void reinit_class(struct lock_class *class)
4498{
4499 void *const p = class;
4500 const unsigned int offset = offsetof(struct lock_class, key);
4501
4502 WARN_ON_ONCE(!class->lock_entry.next);
4503 WARN_ON_ONCE(!list_empty(&class->locks_after));
4504 WARN_ON_ONCE(!list_empty(&class->locks_before));
4505 memset(p + offset, 0, sizeof(*class) - offset);
4506 WARN_ON_ONCE(!class->lock_entry.next);
4507 WARN_ON_ONCE(!list_empty(&class->locks_after));
4508 WARN_ON_ONCE(!list_empty(&class->locks_before));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004509}
4510
Arjan van de Venfabe8742008-01-24 07:00:45 +01004511static inline int within(const void *addr, void *start, unsigned long size)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004512{
4513 return addr >= start && addr < start + size;
4514}
4515
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004516static bool inside_selftest(void)
4517{
4518 return current == lockdep_selftest_task_struct;
4519}
4520
4521/* The caller must hold the graph lock. */
4522static struct pending_free *get_pending_free(void)
4523{
4524 return delayed_free.pf + delayed_free.index;
4525}
4526
4527static void free_zapped_rcu(struct rcu_head *cb);
4528
4529/*
4530 * Schedule an RCU callback if no RCU callback is pending. Must be called with
4531 * the graph lock held.
4532 */
4533static void call_rcu_zapped(struct pending_free *pf)
4534{
4535 WARN_ON_ONCE(inside_selftest());
4536
4537 if (list_empty(&pf->zapped))
4538 return;
4539
4540 if (delayed_free.scheduled)
4541 return;
4542
4543 delayed_free.scheduled = true;
4544
4545 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
4546 delayed_free.index ^= 1;
4547
4548 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
4549}
4550
4551/* The caller must hold the graph lock. May be called from RCU context. */
4552static void __free_zapped_classes(struct pending_free *pf)
4553{
4554 struct lock_class *class;
4555
Bart Van Asscheb526b2e2019-02-14 15:00:51 -08004556 if (check_data_structure_consistency)
4557 WARN_ON_ONCE(!check_data_structures());
4558
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004559 list_for_each_entry(class, &pf->zapped, lock_entry)
4560 reinit_class(class);
4561
4562 list_splice_init(&pf->zapped, &free_lock_classes);
Bart Van Asschede4643a2019-02-14 15:00:50 -08004563
4564#ifdef CONFIG_PROVE_LOCKING
4565 bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
4566 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
4567 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
4568#endif
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004569}
4570
4571static void free_zapped_rcu(struct rcu_head *ch)
4572{
4573 struct pending_free *pf;
4574 unsigned long flags;
4575
4576 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
4577 return;
4578
4579 raw_local_irq_save(flags);
4580 if (!graph_lock())
4581 goto out_irq;
4582
4583 /* closed head */
4584 pf = delayed_free.pf + (delayed_free.index ^ 1);
4585 __free_zapped_classes(pf);
4586 delayed_free.scheduled = false;
4587
4588 /*
4589 * If there's anything on the open list, close and start a new callback.
4590 */
4591 call_rcu_zapped(delayed_free.pf + delayed_free.index);
4592
4593 graph_unlock();
4594out_irq:
4595 raw_local_irq_restore(flags);
4596}
4597
4598/*
4599 * Remove all lock classes from the class hash table and from the
4600 * all_lock_classes list whose key or name is in the address range [start,
4601 * start + size). Move these lock classes to the zapped_classes list. Must
4602 * be called with the graph lock held.
4603 */
4604static void __lockdep_free_key_range(struct pending_free *pf, void *start,
4605 unsigned long size)
Bart Van Assche956f3562019-02-14 15:00:43 -08004606{
4607 struct lock_class *class;
4608 struct hlist_head *head;
4609 int i;
4610
4611 /* Unhash all classes that were created by a module. */
4612 for (i = 0; i < CLASSHASH_SIZE; i++) {
4613 head = classhash_table + i;
4614 hlist_for_each_entry_rcu(class, head, hash_entry) {
4615 if (!within(class->key, start, size) &&
4616 !within(class->name, start, size))
4617 continue;
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004618 zap_class(pf, class);
Bart Van Assche956f3562019-02-14 15:00:43 -08004619 }
4620 }
4621}
4622
Peter Zijlstra35a93932015-02-26 16:23:11 +01004623/*
4624 * Used in module.c to remove lock classes from memory that is going to be
4625 * freed; and possibly re-used by other modules.
4626 *
Bart Van Assche29fc33f2019-02-14 15:00:45 -08004627 * We will have had one synchronize_rcu() before getting here, so we're
4628 * guaranteed nobody will look up these exact classes -- they're properly dead
4629 * but still allocated.
Peter Zijlstra35a93932015-02-26 16:23:11 +01004630 */
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004631static void lockdep_free_key_range_reg(void *start, unsigned long size)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004632{
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004633 struct pending_free *pf;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004634 unsigned long flags;
Nick Piggin5a26db52008-01-16 09:51:58 +01004635 int locked;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004636
Bart Van Asschefeb0a382019-02-14 15:00:42 -08004637 init_data_structures_once();
4638
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004639 raw_local_irq_save(flags);
Nick Piggin5a26db52008-01-16 09:51:58 +01004640 locked = graph_lock();
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004641 if (!locked)
4642 goto out_irq;
4643
4644 pf = get_pending_free();
4645 __lockdep_free_key_range(pf, start, size);
4646 call_rcu_zapped(pf);
4647
4648 graph_unlock();
4649out_irq:
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004650 raw_local_irq_restore(flags);
Peter Zijlstra35a93932015-02-26 16:23:11 +01004651
4652 /*
4653 * Wait for any possible iterators from look_up_lock_class() to pass
4654 * before continuing to free the memory they refer to.
Peter Zijlstra35a93932015-02-26 16:23:11 +01004655 */
Paul E. McKenney51959d82018-11-06 19:06:51 -08004656 synchronize_rcu();
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004657}
Peter Zijlstra35a93932015-02-26 16:23:11 +01004658
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004659/*
4660 * Free all lockdep keys in the range [start, start+size). Does not sleep.
4661 * Ignores debug_locks. Must only be used by the lockdep selftests.
4662 */
4663static void lockdep_free_key_range_imm(void *start, unsigned long size)
4664{
4665 struct pending_free *pf = delayed_free.pf;
4666 unsigned long flags;
4667
4668 init_data_structures_once();
4669
4670 raw_local_irq_save(flags);
4671 arch_spin_lock(&lockdep_lock);
4672 __lockdep_free_key_range(pf, start, size);
4673 __free_zapped_classes(pf);
4674 arch_spin_unlock(&lockdep_lock);
4675 raw_local_irq_restore(flags);
4676}
4677
4678void lockdep_free_key_range(void *start, unsigned long size)
4679{
4680 init_data_structures_once();
4681
4682 if (inside_selftest())
4683 lockdep_free_key_range_imm(start, size);
4684 else
4685 lockdep_free_key_range_reg(start, size);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004686}
4687
Bart Van Assche2904d9f2018-12-06 17:11:34 -08004688/*
4689 * Check whether any element of the @lock->class_cache[] array refers to a
4690 * registered lock class. The caller must hold either the graph lock or the
4691 * RCU read lock.
4692 */
4693static bool lock_class_cache_is_registered(struct lockdep_map *lock)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004694{
Peter Zijlstra35a93932015-02-26 16:23:11 +01004695 struct lock_class *class;
Andrew Mortona63f38c2016-02-03 13:44:12 -08004696 struct hlist_head *head;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004697 int i, j;
Bart Van Assche2904d9f2018-12-06 17:11:34 -08004698
4699 for (i = 0; i < CLASSHASH_SIZE; i++) {
4700 head = classhash_table + i;
4701 hlist_for_each_entry_rcu(class, head, hash_entry) {
4702 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4703 if (lock->class_cache[j] == class)
4704 return true;
4705 }
4706 }
4707 return false;
4708}
4709
Bart Van Assche956f3562019-02-14 15:00:43 -08004710/* The caller must hold the graph lock. Does not sleep. */
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004711static void __lockdep_reset_lock(struct pending_free *pf,
4712 struct lockdep_map *lock)
Bart Van Assche2904d9f2018-12-06 17:11:34 -08004713{
4714 struct lock_class *class;
Bart Van Assche956f3562019-02-14 15:00:43 -08004715 int j;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004716
4717 /*
Ingo Molnard6d897c2006-07-10 04:44:04 -07004718 * Remove all classes this lock might have:
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004719 */
Ingo Molnard6d897c2006-07-10 04:44:04 -07004720 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4721 /*
4722 * If the class exists we look it up and zap it:
4723 */
4724 class = look_up_lock_class(lock, j);
Matthew Wilcox64f29d12018-01-17 07:14:12 -08004725 if (class)
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004726 zap_class(pf, class);
Ingo Molnard6d897c2006-07-10 04:44:04 -07004727 }
4728 /*
4729 * Debug check: in the end all mapped classes should
4730 * be gone.
4731 */
Bart Van Assche956f3562019-02-14 15:00:43 -08004732 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
4733 debug_locks_off();
4734}
4735
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004736/*
4737 * Remove all information lockdep has about a lock if debug_locks == 1. Free
4738 * released data structures from RCU context.
4739 */
4740static void lockdep_reset_lock_reg(struct lockdep_map *lock)
Bart Van Assche956f3562019-02-14 15:00:43 -08004741{
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004742 struct pending_free *pf;
Bart Van Assche956f3562019-02-14 15:00:43 -08004743 unsigned long flags;
4744 int locked;
4745
Bart Van Assche956f3562019-02-14 15:00:43 -08004746 raw_local_irq_save(flags);
4747 locked = graph_lock();
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004748 if (!locked)
4749 goto out_irq;
4750
4751 pf = get_pending_free();
4752 __lockdep_reset_lock(pf, lock);
4753 call_rcu_zapped(pf);
4754
4755 graph_unlock();
4756out_irq:
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004757 raw_local_irq_restore(flags);
4758}
4759
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004760/*
4761 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4762 * lockdep selftests.
4763 */
4764static void lockdep_reset_lock_imm(struct lockdep_map *lock)
4765{
4766 struct pending_free *pf = delayed_free.pf;
4767 unsigned long flags;
4768
4769 raw_local_irq_save(flags);
4770 arch_spin_lock(&lockdep_lock);
4771 __lockdep_reset_lock(pf, lock);
4772 __free_zapped_classes(pf);
4773 arch_spin_unlock(&lockdep_lock);
4774 raw_local_irq_restore(flags);
4775}
4776
4777void lockdep_reset_lock(struct lockdep_map *lock)
4778{
4779 init_data_structures_once();
4780
4781 if (inside_selftest())
4782 lockdep_reset_lock_imm(lock);
4783 else
4784 lockdep_reset_lock_reg(lock);
4785}
4786
Joel Fernandes (Google)c3bc8fd2018-07-30 15:24:23 -07004787void __init lockdep_init(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004788{
4789 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4790
Li Zefanb0788ca2008-11-21 15:57:32 +08004791 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004792 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
4793 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
Li Zefanb0788ca2008-11-21 15:57:32 +08004794 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004795 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
4796 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
4797 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
4798
Bart Van Assche09d75ec2019-02-14 15:00:36 -08004799 printk(" memory used by lock dependency info: %zu kB\n",
Bart Van Assche7ff85172019-02-14 15:00:37 -08004800 (sizeof(lock_classes) +
4801 sizeof(classhash_table) +
4802 sizeof(list_entries) +
Bart Van Asscheace35a72019-02-14 15:00:47 -08004803 sizeof(list_entries_in_use) +
Bart Van Asschea0b0fd52019-02-14 15:00:46 -08004804 sizeof(chainhash_table) +
4805 sizeof(delayed_free)
Ming Lei4dd861d2009-07-16 15:44:29 +02004806#ifdef CONFIG_PROVE_LOCKING
Bart Van Assche7ff85172019-02-14 15:00:37 -08004807 + sizeof(lock_cq)
Bart Van Assche15ea86b2019-02-14 15:00:38 -08004808 + sizeof(lock_chains)
Bart Van Asschede4643a2019-02-14 15:00:50 -08004809 + sizeof(lock_chains_in_use)
Bart Van Assche15ea86b2019-02-14 15:00:38 -08004810 + sizeof(chain_hlocks)
Ming Lei4dd861d2009-07-16 15:44:29 +02004811#endif
Ming Lei906292092009-08-02 21:43:36 +08004812 ) / 1024
Ming Lei4dd861d2009-07-16 15:44:29 +02004813 );
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004814
Bart Van Assche09d75ec2019-02-14 15:00:36 -08004815 printk(" per task-struct memory footprint: %zu bytes\n",
Bart Van Assche7ff85172019-02-14 15:00:37 -08004816 sizeof(((struct task_struct *)NULL)->held_locks));
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004817}
4818
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004819static void
4820print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
Arjan van de Ven55794a42006-07-10 04:44:03 -07004821 const void *mem_to, struct held_lock *hlock)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004822{
4823 if (!debug_locks_off())
4824 return;
4825 if (debug_locks_silent)
4826 return;
4827
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004828 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004829 pr_warn("=========================\n");
4830 pr_warn("WARNING: held lock freed!\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01004831 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004832 pr_warn("-------------------------\n");
Borislav Petkov04860d42018-02-26 14:49:26 +01004833 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07004834 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
Arjan van de Ven55794a42006-07-10 04:44:03 -07004835 print_lock(hlock);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004836 lockdep_print_held_locks(curr);
4837
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004838 pr_warn("\nstack backtrace:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004839 dump_stack();
4840}
4841
Oleg Nesterov54561782007-12-05 15:46:09 +01004842static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4843 const void* lock_from, unsigned long lock_len)
4844{
4845 return lock_from + lock_len <= mem_from ||
4846 mem_from + mem_len <= lock_from;
4847}
4848
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004849/*
4850 * Called when kernel memory is freed (or unmapped), or if a lock
4851 * is destroyed or reinitialized - this code checks whether there is
4852 * any held lock in the memory range of <from> to <to>:
4853 */
4854void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4855{
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004856 struct task_struct *curr = current;
4857 struct held_lock *hlock;
4858 unsigned long flags;
4859 int i;
4860
4861 if (unlikely(!debug_locks))
4862 return;
4863
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04004864 raw_local_irq_save(flags);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004865 for (i = 0; i < curr->lockdep_depth; i++) {
4866 hlock = curr->held_locks + i;
4867
Oleg Nesterov54561782007-12-05 15:46:09 +01004868 if (not_in_range(mem_from, mem_len, hlock->instance,
4869 sizeof(*hlock->instance)))
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004870 continue;
4871
Oleg Nesterov54561782007-12-05 15:46:09 +01004872 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004873 break;
4874 }
Steven Rostedt (VMware)fcc784b2018-04-04 14:06:30 -04004875 raw_local_irq_restore(flags);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004876}
Peter Zijlstraed075362006-12-06 20:35:24 -08004877EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004878
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004879static void print_held_locks_bug(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004880{
4881 if (!debug_locks_off())
4882 return;
4883 if (debug_locks_silent)
4884 return;
4885
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004886 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004887 pr_warn("====================================\n");
4888 pr_warn("WARNING: %s/%d still has locks held!\n",
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004889 current->comm, task_pid_nr(current));
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01004890 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004891 pr_warn("------------------------------------\n");
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004892 lockdep_print_held_locks(current);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004893 pr_warn("\nstack backtrace:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004894 dump_stack();
4895}
4896
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004897void debug_check_no_locks_held(void)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004898{
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004899 if (unlikely(current->lockdep_depth > 0))
4900 print_held_locks_bug();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004901}
Colin Cross1b1d2fb2013-05-06 23:50:08 +00004902EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004903
Sasha Levin8dce7a92013-06-13 18:41:16 -04004904#ifdef __KERNEL__
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004905void debug_show_all_locks(void)
4906{
4907 struct task_struct *g, *p;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004908
Jarek Poplawski9c35dd72007-03-22 00:11:28 -08004909 if (unlikely(!debug_locks)) {
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004910 pr_warn("INFO: lockdep is turned off.\n");
Jarek Poplawski9c35dd72007-03-22 00:11:28 -08004911 return;
4912 }
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004913 pr_warn("\nShowing all locks held in the system:\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004914
Tetsuo Handa0f736a52018-04-06 19:41:18 +09004915 rcu_read_lock();
4916 for_each_process_thread(g, p) {
Tetsuo Handa0f736a52018-04-06 19:41:18 +09004917 if (!p->lockdep_depth)
4918 continue;
4919 lockdep_print_held_locks(p);
Tejun Heo88f1c872018-01-22 14:00:55 -08004920 touch_nmi_watchdog();
Tetsuo Handa0f736a52018-04-06 19:41:18 +09004921 touch_all_softlockup_watchdogs();
4922 }
4923 rcu_read_unlock();
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004924
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004925 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004926 pr_warn("=============================================\n\n");
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004927}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004928EXPORT_SYMBOL_GPL(debug_show_all_locks);
Sasha Levin8dce7a92013-06-13 18:41:16 -04004929#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004930
Ingo Molnar82a1fcb2008-01-25 21:08:02 +01004931/*
4932 * Careful: only use this function if you are sure that
4933 * the task cannot run in parallel!
4934 */
John Kacurf1b499f2010-08-05 17:10:53 +02004935void debug_show_held_locks(struct task_struct *task)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004936{
Jarek Poplawski9c35dd72007-03-22 00:11:28 -08004937 if (unlikely(!debug_locks)) {
4938 printk("INFO: lockdep is turned off.\n");
4939 return;
4940 }
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004941 lockdep_print_held_locks(task);
4942}
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07004943EXPORT_SYMBOL_GPL(debug_show_held_locks);
Peter Zijlstrab351d162007-10-11 22:11:12 +02004944
Andi Kleen722a9f92014-05-02 00:44:38 +02004945asmlinkage __visible void lockdep_sys_exit(void)
Peter Zijlstrab351d162007-10-11 22:11:12 +02004946{
4947 struct task_struct *curr = current;
4948
4949 if (unlikely(curr->lockdep_depth)) {
4950 if (!debug_locks_off())
4951 return;
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004952 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004953 pr_warn("================================================\n");
4954 pr_warn("WARNING: lock held when returning to user space!\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01004955 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004956 pr_warn("------------------------------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004957 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
Peter Zijlstrab351d162007-10-11 22:11:12 +02004958 curr->comm, curr->pid);
4959 lockdep_print_held_locks(curr);
4960 }
Byungchul Parkb09be672017-08-07 16:12:52 +09004961
4962 /*
4963 * The lock history for each syscall should be independent. So wipe the
4964 * slate clean on return to userspace.
4965 */
Peter Zijlstraf52be572017-08-29 10:59:39 +02004966 lockdep_invariant_state(false);
Peter Zijlstrab351d162007-10-11 22:11:12 +02004967}
Paul E. McKenney0632eb32010-02-22 17:04:47 -08004968
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -07004969void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
Paul E. McKenney0632eb32010-02-22 17:04:47 -08004970{
4971 struct task_struct *curr = current;
4972
Lai Jiangshan2b3fc352010-04-20 16:23:07 +08004973 /* Note: the following can be executed concurrently, so be careful. */
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004974 pr_warn("\n");
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004975 pr_warn("=============================\n");
4976 pr_warn("WARNING: suspicious RCU usage\n");
Ben Hutchingsfbdc4b92011-10-28 04:36:55 +01004977 print_kernel_ident();
Paul E. McKenneya5dd63e2017-01-31 07:45:13 -08004978 pr_warn("-----------------------------\n");
Paul E. McKenney681fbec2017-05-04 15:44:38 -07004979 pr_warn("%s:%d %s!\n", file, line, s);
4980 pr_warn("\nother info that might help us debug this:\n\n");
4981 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
Paul E. McKenneyc5fdcec2012-01-30 08:46:32 -08004982 !rcu_lockdep_current_cpu_online()
4983 ? "RCU used illegally from offline CPU!\n"
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07004984 : !rcu_is_watching()
Paul E. McKenneyc5fdcec2012-01-30 08:46:32 -08004985 ? "RCU used illegally from idle CPU!\n"
4986 : "",
4987 rcu_scheduler_active, debug_locks);
Frederic Weisbecker0464e932011-10-07 18:22:01 +02004988
4989 /*
4990 * If a CPU is in the RCU-free window in idle (ie: in the section
4991 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4992 * considers that CPU to be in an "extended quiescent state",
4993 * which means that RCU will be completely ignoring that CPU.
4994 * Therefore, rcu_read_lock() and friends have absolutely no
4995 * effect on a CPU running in that state. In other words, even if
4996 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4997 * delete data structures out from under it. RCU really has no
4998 * choice here: we need to keep an RCU-free window in idle where
4999 * the CPU may possibly enter into low power mode. This way we can
5000 * notice an extended quiescent state to other CPUs that started a grace
5001 * period. Otherwise we would delay any grace period as long as we run
5002 * in the idle task.
5003 *
5004 * So complain bitterly if someone does call rcu_read_lock(),
5005 * rcu_read_lock_bh() and so on from extended quiescent states.
5006 */
Paul E. McKenney5c173eb2013-09-13 17:20:11 -07005007 if (!rcu_is_watching())
Paul E. McKenney681fbec2017-05-04 15:44:38 -07005008 pr_warn("RCU used illegally from extended quiescent state!\n");
Frederic Weisbecker0464e932011-10-07 18:22:01 +02005009
Paul E. McKenney0632eb32010-02-22 17:04:47 -08005010 lockdep_print_held_locks(curr);
Paul E. McKenney681fbec2017-05-04 15:44:38 -07005011 pr_warn("\nstack backtrace:\n");
Paul E. McKenney0632eb32010-02-22 17:04:47 -08005012 dump_stack();
5013}
Paul E. McKenneyb3fbab02011-05-24 08:31:09 -07005014EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);