[PATCH] memory hotadd fixes: find_next_system_ram catch range fix
[linux-2.6.git] / kernel / spinlock.c
1 /*
2  * Copyright (2004) Linus Torvalds
3  *
4  * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
5  *
6  * Copyright (2004, 2005) Ingo Molnar
7  *
8  * This file contains the spinlock/rwlock implementations for the
9  * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10  */
11
12 #include <linux/linkage.h>
13 #include <linux/preempt.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/debug_locks.h>
17 #include <linux/module.h>
18
19 /*
20  * Generic declaration of the raw read_trylock() function,
21  * architectures are supposed to optimize this:
22  */
23 int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
24 {
25         __raw_read_lock(lock);
26         return 1;
27 }
28 EXPORT_SYMBOL(generic__raw_read_trylock);
29
30 int __lockfunc _spin_trylock(spinlock_t *lock)
31 {
32         preempt_disable();
33         if (_raw_spin_trylock(lock)) {
34                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
35                 return 1;
36         }
37         
38         preempt_enable();
39         return 0;
40 }
41 EXPORT_SYMBOL(_spin_trylock);
42
43 int __lockfunc _read_trylock(rwlock_t *lock)
44 {
45         preempt_disable();
46         if (_raw_read_trylock(lock)) {
47                 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
48                 return 1;
49         }
50
51         preempt_enable();
52         return 0;
53 }
54 EXPORT_SYMBOL(_read_trylock);
55
56 int __lockfunc _write_trylock(rwlock_t *lock)
57 {
58         preempt_disable();
59         if (_raw_write_trylock(lock)) {
60                 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
61                 return 1;
62         }
63
64         preempt_enable();
65         return 0;
66 }
67 EXPORT_SYMBOL(_write_trylock);
68
69 /*
70  * If lockdep is enabled then we use the non-preemption spin-ops
71  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
72  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
73  */
74 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
75         defined(CONFIG_PROVE_LOCKING)
76
77 void __lockfunc _read_lock(rwlock_t *lock)
78 {
79         preempt_disable();
80         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
81         _raw_read_lock(lock);
82 }
83 EXPORT_SYMBOL(_read_lock);
84
85 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
86 {
87         unsigned long flags;
88
89         local_irq_save(flags);
90         preempt_disable();
91         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
92         /*
93          * On lockdep we dont want the hand-coded irq-enable of
94          * _raw_spin_lock_flags() code, because lockdep assumes
95          * that interrupts are not re-enabled during lock-acquire:
96          */
97 #ifdef CONFIG_PROVE_LOCKING
98         _raw_spin_lock(lock);
99 #else
100         _raw_spin_lock_flags(lock, &flags);
101 #endif
102         return flags;
103 }
104 EXPORT_SYMBOL(_spin_lock_irqsave);
105
106 void __lockfunc _spin_lock_irq(spinlock_t *lock)
107 {
108         local_irq_disable();
109         preempt_disable();
110         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
111         _raw_spin_lock(lock);
112 }
113 EXPORT_SYMBOL(_spin_lock_irq);
114
115 void __lockfunc _spin_lock_bh(spinlock_t *lock)
116 {
117         local_bh_disable();
118         preempt_disable();
119         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
120         _raw_spin_lock(lock);
121 }
122 EXPORT_SYMBOL(_spin_lock_bh);
123
124 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
125 {
126         unsigned long flags;
127
128         local_irq_save(flags);
129         preempt_disable();
130         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
131         _raw_read_lock(lock);
132         return flags;
133 }
134 EXPORT_SYMBOL(_read_lock_irqsave);
135
136 void __lockfunc _read_lock_irq(rwlock_t *lock)
137 {
138         local_irq_disable();
139         preempt_disable();
140         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
141         _raw_read_lock(lock);
142 }
143 EXPORT_SYMBOL(_read_lock_irq);
144
145 void __lockfunc _read_lock_bh(rwlock_t *lock)
146 {
147         local_bh_disable();
148         preempt_disable();
149         rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150         _raw_read_lock(lock);
151 }
152 EXPORT_SYMBOL(_read_lock_bh);
153
154 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
155 {
156         unsigned long flags;
157
158         local_irq_save(flags);
159         preempt_disable();
160         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
161         _raw_write_lock(lock);
162         return flags;
163 }
164 EXPORT_SYMBOL(_write_lock_irqsave);
165
166 void __lockfunc _write_lock_irq(rwlock_t *lock)
167 {
168         local_irq_disable();
169         preempt_disable();
170         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
171         _raw_write_lock(lock);
172 }
173 EXPORT_SYMBOL(_write_lock_irq);
174
175 void __lockfunc _write_lock_bh(rwlock_t *lock)
176 {
177         local_bh_disable();
178         preempt_disable();
179         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
180         _raw_write_lock(lock);
181 }
182 EXPORT_SYMBOL(_write_lock_bh);
183
184 void __lockfunc _spin_lock(spinlock_t *lock)
185 {
186         preempt_disable();
187         spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188         _raw_spin_lock(lock);
189 }
190
191 EXPORT_SYMBOL(_spin_lock);
192
193 void __lockfunc _write_lock(rwlock_t *lock)
194 {
195         preempt_disable();
196         rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
197         _raw_write_lock(lock);
198 }
199
200 EXPORT_SYMBOL(_write_lock);
201
202 #else /* CONFIG_PREEMPT: */
203
204 /*
205  * This could be a long-held lock. We both prepare to spin for a long
206  * time (making _this_ CPU preemptable if possible), and we also signal
207  * towards that other CPU that it should break the lock ASAP.
208  *
209  * (We do this in a function because inlining it would be excessive.)
210  */
211
212 #define BUILD_LOCK_OPS(op, locktype)                                    \
213 void __lockfunc _##op##_lock(locktype##_t *lock)                        \
214 {                                                                       \
215         for (;;) {                                                      \
216                 preempt_disable();                                      \
217                 if (likely(_raw_##op##_trylock(lock)))                  \
218                         break;                                          \
219                 preempt_enable();                                       \
220                                                                         \
221                 if (!(lock)->break_lock)                                \
222                         (lock)->break_lock = 1;                         \
223                 while (!op##_can_lock(lock) && (lock)->break_lock)      \
224                         cpu_relax();                                    \
225         }                                                               \
226         (lock)->break_lock = 0;                                         \
227 }                                                                       \
228                                                                         \
229 EXPORT_SYMBOL(_##op##_lock);                                            \
230                                                                         \
231 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock)       \
232 {                                                                       \
233         unsigned long flags;                                            \
234                                                                         \
235         for (;;) {                                                      \
236                 preempt_disable();                                      \
237                 local_irq_save(flags);                                  \
238                 if (likely(_raw_##op##_trylock(lock)))                  \
239                         break;                                          \
240                 local_irq_restore(flags);                               \
241                 preempt_enable();                                       \
242                                                                         \
243                 if (!(lock)->break_lock)                                \
244                         (lock)->break_lock = 1;                         \
245                 while (!op##_can_lock(lock) && (lock)->break_lock)      \
246                         cpu_relax();                                    \
247         }                                                               \
248         (lock)->break_lock = 0;                                         \
249         return flags;                                                   \
250 }                                                                       \
251                                                                         \
252 EXPORT_SYMBOL(_##op##_lock_irqsave);                                    \
253                                                                         \
254 void __lockfunc _##op##_lock_irq(locktype##_t *lock)                    \
255 {                                                                       \
256         _##op##_lock_irqsave(lock);                                     \
257 }                                                                       \
258                                                                         \
259 EXPORT_SYMBOL(_##op##_lock_irq);                                        \
260                                                                         \
261 void __lockfunc _##op##_lock_bh(locktype##_t *lock)                     \
262 {                                                                       \
263         unsigned long flags;                                            \
264                                                                         \
265         /*                                                      */      \
266         /* Careful: we must exclude softirqs too, hence the     */      \
267         /* irq-disabling. We use the generic preemption-aware   */      \
268         /* function:                                            */      \
269         /**/                                                            \
270         flags = _##op##_lock_irqsave(lock);                             \
271         local_bh_disable();                                             \
272         local_irq_restore(flags);                                       \
273 }                                                                       \
274                                                                         \
275 EXPORT_SYMBOL(_##op##_lock_bh)
276
277 /*
278  * Build preemption-friendly versions of the following
279  * lock-spinning functions:
280  *
281  *         _[spin|read|write]_lock()
282  *         _[spin|read|write]_lock_irq()
283  *         _[spin|read|write]_lock_irqsave()
284  *         _[spin|read|write]_lock_bh()
285  */
286 BUILD_LOCK_OPS(spin, spinlock);
287 BUILD_LOCK_OPS(read, rwlock);
288 BUILD_LOCK_OPS(write, rwlock);
289
290 #endif /* CONFIG_PREEMPT */
291
292 #ifdef CONFIG_DEBUG_LOCK_ALLOC
293
294 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
295 {
296         preempt_disable();
297         spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
298         _raw_spin_lock(lock);
299 }
300
301 EXPORT_SYMBOL(_spin_lock_nested);
302
303 #endif
304
305 void __lockfunc _spin_unlock(spinlock_t *lock)
306 {
307         spin_release(&lock->dep_map, 1, _RET_IP_);
308         _raw_spin_unlock(lock);
309         preempt_enable();
310 }
311 EXPORT_SYMBOL(_spin_unlock);
312
313 void __lockfunc _write_unlock(rwlock_t *lock)
314 {
315         rwlock_release(&lock->dep_map, 1, _RET_IP_);
316         _raw_write_unlock(lock);
317         preempt_enable();
318 }
319 EXPORT_SYMBOL(_write_unlock);
320
321 void __lockfunc _read_unlock(rwlock_t *lock)
322 {
323         rwlock_release(&lock->dep_map, 1, _RET_IP_);
324         _raw_read_unlock(lock);
325         preempt_enable();
326 }
327 EXPORT_SYMBOL(_read_unlock);
328
329 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
330 {
331         spin_release(&lock->dep_map, 1, _RET_IP_);
332         _raw_spin_unlock(lock);
333         local_irq_restore(flags);
334         preempt_enable();
335 }
336 EXPORT_SYMBOL(_spin_unlock_irqrestore);
337
338 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
339 {
340         spin_release(&lock->dep_map, 1, _RET_IP_);
341         _raw_spin_unlock(lock);
342         local_irq_enable();
343         preempt_enable();
344 }
345 EXPORT_SYMBOL(_spin_unlock_irq);
346
347 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
348 {
349         spin_release(&lock->dep_map, 1, _RET_IP_);
350         _raw_spin_unlock(lock);
351         preempt_enable_no_resched();
352         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
353 }
354 EXPORT_SYMBOL(_spin_unlock_bh);
355
356 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
357 {
358         rwlock_release(&lock->dep_map, 1, _RET_IP_);
359         _raw_read_unlock(lock);
360         local_irq_restore(flags);
361         preempt_enable();
362 }
363 EXPORT_SYMBOL(_read_unlock_irqrestore);
364
365 void __lockfunc _read_unlock_irq(rwlock_t *lock)
366 {
367         rwlock_release(&lock->dep_map, 1, _RET_IP_);
368         _raw_read_unlock(lock);
369         local_irq_enable();
370         preempt_enable();
371 }
372 EXPORT_SYMBOL(_read_unlock_irq);
373
374 void __lockfunc _read_unlock_bh(rwlock_t *lock)
375 {
376         rwlock_release(&lock->dep_map, 1, _RET_IP_);
377         _raw_read_unlock(lock);
378         preempt_enable_no_resched();
379         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
380 }
381 EXPORT_SYMBOL(_read_unlock_bh);
382
383 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
384 {
385         rwlock_release(&lock->dep_map, 1, _RET_IP_);
386         _raw_write_unlock(lock);
387         local_irq_restore(flags);
388         preempt_enable();
389 }
390 EXPORT_SYMBOL(_write_unlock_irqrestore);
391
392 void __lockfunc _write_unlock_irq(rwlock_t *lock)
393 {
394         rwlock_release(&lock->dep_map, 1, _RET_IP_);
395         _raw_write_unlock(lock);
396         local_irq_enable();
397         preempt_enable();
398 }
399 EXPORT_SYMBOL(_write_unlock_irq);
400
401 void __lockfunc _write_unlock_bh(rwlock_t *lock)
402 {
403         rwlock_release(&lock->dep_map, 1, _RET_IP_);
404         _raw_write_unlock(lock);
405         preempt_enable_no_resched();
406         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
407 }
408 EXPORT_SYMBOL(_write_unlock_bh);
409
410 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
411 {
412         local_bh_disable();
413         preempt_disable();
414         if (_raw_spin_trylock(lock)) {
415                 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
416                 return 1;
417         }
418
419         preempt_enable_no_resched();
420         local_bh_enable_ip((unsigned long)__builtin_return_address(0));
421         return 0;
422 }
423 EXPORT_SYMBOL(_spin_trylock_bh);
424
425 int in_lock_functions(unsigned long addr)
426 {
427         /* Linker adds these: start and end of __lockfunc functions */
428         extern char __lock_text_start[], __lock_text_end[];
429
430         return addr >= (unsigned long)__lock_text_start
431         && addr < (unsigned long)__lock_text_end;
432 }
433 EXPORT_SYMBOL(in_lock_functions);