Lockdep: add lockdep_set_class_and_subclass() and lockdep_set_subclass()
[linux-2.6.git] / lib / spinlock_debug.c
1 /*
2  * Copyright 2005, Red Hat, Inc., Ingo Molnar
3  * Released under the General Public License (GPL).
4  *
5  * This file contains the spinlock/rwlock implementations for
6  * DEBUG_SPINLOCK.
7  */
8
9 #include <linux/spinlock.h>
10 #include <linux/interrupt.h>
11 #include <linux/debug_locks.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14
15 void __spin_lock_init(spinlock_t *lock, const char *name,
16                       struct lock_class_key *key)
17 {
18 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19         /*
20          * Make sure we are not reinitializing a held lock:
21          */
22         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
23         lockdep_init_map(&lock->dep_map, name, key, 0);
24 #endif
25         lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
26         lock->magic = SPINLOCK_MAGIC;
27         lock->owner = SPINLOCK_OWNER_INIT;
28         lock->owner_cpu = -1;
29 }
30
31 EXPORT_SYMBOL(__spin_lock_init);
32
33 void __rwlock_init(rwlock_t *lock, const char *name,
34                    struct lock_class_key *key)
35 {
36 #ifdef CONFIG_DEBUG_LOCK_ALLOC
37         /*
38          * Make sure we are not reinitializing a held lock:
39          */
40         debug_check_no_locks_freed((void *)lock, sizeof(*lock));
41         lockdep_init_map(&lock->dep_map, name, key, 0);
42 #endif
43         lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
44         lock->magic = RWLOCK_MAGIC;
45         lock->owner = SPINLOCK_OWNER_INIT;
46         lock->owner_cpu = -1;
47 }
48
49 EXPORT_SYMBOL(__rwlock_init);
50
51 static void spin_bug(spinlock_t *lock, const char *msg)
52 {
53         struct task_struct *owner = NULL;
54
55         if (!debug_locks_off())
56                 return;
57
58         if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
59                 owner = lock->owner;
60         printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
61                 msg, raw_smp_processor_id(),
62                 current->comm, current->pid);
63         printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
64                         ".owner_cpu: %d\n",
65                 lock, lock->magic,
66                 owner ? owner->comm : "<none>",
67                 owner ? owner->pid : -1,
68                 lock->owner_cpu);
69         dump_stack();
70 }
71
72 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
73
74 static inline void
75 debug_spin_lock_before(spinlock_t *lock)
76 {
77         SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
78         SPIN_BUG_ON(lock->owner == current, lock, "recursion");
79         SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
80                                                         lock, "cpu recursion");
81 }
82
83 static inline void debug_spin_lock_after(spinlock_t *lock)
84 {
85         lock->owner_cpu = raw_smp_processor_id();
86         lock->owner = current;
87 }
88
89 static inline void debug_spin_unlock(spinlock_t *lock)
90 {
91         SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
92         SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
93         SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
94         SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
95                                                         lock, "wrong CPU");
96         lock->owner = SPINLOCK_OWNER_INIT;
97         lock->owner_cpu = -1;
98 }
99
100 static void __spin_lock_debug(spinlock_t *lock)
101 {
102         u64 i;
103         u64 loops = loops_per_jiffy * HZ;
104         int print_once = 1;
105
106         for (;;) {
107                 for (i = 0; i < loops; i++) {
108                         if (__raw_spin_trylock(&lock->raw_lock))
109                                 return;
110                         __delay(1);
111                 }
112                 /* lockup suspected: */
113                 if (print_once) {
114                         print_once = 0;
115                         printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
116                                         "%s/%d, %p\n",
117                                 raw_smp_processor_id(), current->comm,
118                                 current->pid, lock);
119                         dump_stack();
120                 }
121         }
122 }
123
124 void _raw_spin_lock(spinlock_t *lock)
125 {
126         debug_spin_lock_before(lock);
127         if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
128                 __spin_lock_debug(lock);
129         debug_spin_lock_after(lock);
130 }
131
132 int _raw_spin_trylock(spinlock_t *lock)
133 {
134         int ret = __raw_spin_trylock(&lock->raw_lock);
135
136         if (ret)
137                 debug_spin_lock_after(lock);
138 #ifndef CONFIG_SMP
139         /*
140          * Must not happen on UP:
141          */
142         SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
143 #endif
144         return ret;
145 }
146
147 void _raw_spin_unlock(spinlock_t *lock)
148 {
149         debug_spin_unlock(lock);
150         __raw_spin_unlock(&lock->raw_lock);
151 }
152
153 static void rwlock_bug(rwlock_t *lock, const char *msg)
154 {
155         if (!debug_locks_off())
156                 return;
157
158         printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
159                 msg, raw_smp_processor_id(), current->comm,
160                 current->pid, lock);
161         dump_stack();
162 }
163
164 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
165
166 #if 0           /* __write_lock_debug() can lock up - maybe this can too? */
167 static void __read_lock_debug(rwlock_t *lock)
168 {
169         u64 i;
170         u64 loops = loops_per_jiffy * HZ;
171         int print_once = 1;
172
173         for (;;) {
174                 for (i = 0; i < loops; i++) {
175                         if (__raw_read_trylock(&lock->raw_lock))
176                                 return;
177                         __delay(1);
178                 }
179                 /* lockup suspected: */
180                 if (print_once) {
181                         print_once = 0;
182                         printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
183                                         "%s/%d, %p\n",
184                                 raw_smp_processor_id(), current->comm,
185                                 current->pid, lock);
186                         dump_stack();
187                 }
188         }
189 }
190 #endif
191
192 void _raw_read_lock(rwlock_t *lock)
193 {
194         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
195         __raw_read_lock(&lock->raw_lock);
196 }
197
198 int _raw_read_trylock(rwlock_t *lock)
199 {
200         int ret = __raw_read_trylock(&lock->raw_lock);
201
202 #ifndef CONFIG_SMP
203         /*
204          * Must not happen on UP:
205          */
206         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
207 #endif
208         return ret;
209 }
210
211 void _raw_read_unlock(rwlock_t *lock)
212 {
213         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
214         __raw_read_unlock(&lock->raw_lock);
215 }
216
217 static inline void debug_write_lock_before(rwlock_t *lock)
218 {
219         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
220         RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
221         RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
222                                                         lock, "cpu recursion");
223 }
224
225 static inline void debug_write_lock_after(rwlock_t *lock)
226 {
227         lock->owner_cpu = raw_smp_processor_id();
228         lock->owner = current;
229 }
230
231 static inline void debug_write_unlock(rwlock_t *lock)
232 {
233         RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
234         RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
235         RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
236                                                         lock, "wrong CPU");
237         lock->owner = SPINLOCK_OWNER_INIT;
238         lock->owner_cpu = -1;
239 }
240
241 #if 0           /* This can cause lockups */
242 static void __write_lock_debug(rwlock_t *lock)
243 {
244         u64 i;
245         u64 loops = loops_per_jiffy * HZ;
246         int print_once = 1;
247
248         for (;;) {
249                 for (i = 0; i < loops; i++) {
250                         if (__raw_write_trylock(&lock->raw_lock))
251                                 return;
252                         __delay(1);
253                 }
254                 /* lockup suspected: */
255                 if (print_once) {
256                         print_once = 0;
257                         printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
258                                         "%s/%d, %p\n",
259                                 raw_smp_processor_id(), current->comm,
260                                 current->pid, lock);
261                         dump_stack();
262                 }
263         }
264 }
265 #endif
266
267 void _raw_write_lock(rwlock_t *lock)
268 {
269         debug_write_lock_before(lock);
270         __raw_write_lock(&lock->raw_lock);
271         debug_write_lock_after(lock);
272 }
273
274 int _raw_write_trylock(rwlock_t *lock)
275 {
276         int ret = __raw_write_trylock(&lock->raw_lock);
277
278         if (ret)
279                 debug_write_lock_after(lock);
280 #ifndef CONFIG_SMP
281         /*
282          * Must not happen on UP:
283          */
284         RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
285 #endif
286         return ret;
287 }
288
289 void _raw_write_unlock(rwlock_t *lock)
290 {
291         debug_write_unlock(lock);
292         __raw_write_unlock(&lock->raw_lock);
293 }