]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - lib/kernel_lock.c
sparc64: Support kmemleak.
[linux-3.10.git] / lib / kernel_lock.c
1 /*
2  * lib/kernel_lock.c
3  *
4  * This is the traditional BKL - big kernel lock. Largely
5  * relegated to obsolescence, but used by various less
6  * important (or lazy) subsystems.
7  */
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/semaphore.h>
11 #include <linux/smp_lock.h>
12
13 #define CREATE_TRACE_POINTS
14 #include <trace/events/bkl.h>
15
16 /*
17  * The 'big kernel lock'
18  *
19  * This spinlock is taken and released recursively by lock_kernel()
20  * and unlock_kernel().  It is transparently dropped and reacquired
21  * over schedule().  It is used to protect legacy code that hasn't
22  * been migrated to a proper locking design yet.
23  *
24  * Don't use in new code.
25  */
26 static  __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
27
28
29 /*
30  * Acquire/release the underlying lock from the scheduler.
31  *
32  * This is called with preemption disabled, and should
33  * return an error value if it cannot get the lock and
34  * TIF_NEED_RESCHED gets set.
35  *
36  * If it successfully gets the lock, it should increment
37  * the preemption count like any spinlock does.
38  *
39  * (This works on UP too - do_raw_spin_trylock will never
40  * return false in that case)
41  */
42 int __lockfunc __reacquire_kernel_lock(void)
43 {
44         while (!do_raw_spin_trylock(&kernel_flag)) {
45                 if (need_resched())
46                         return -EAGAIN;
47                 cpu_relax();
48         }
49         preempt_disable();
50         return 0;
51 }
52
53 void __lockfunc __release_kernel_lock(void)
54 {
55         do_raw_spin_unlock(&kernel_flag);
56         preempt_enable_no_resched();
57 }
58
59 /*
60  * These are the BKL spinlocks - we try to be polite about preemption.
61  * If SMP is not on (ie UP preemption), this all goes away because the
62  * do_raw_spin_trylock() will always succeed.
63  */
64 #ifdef CONFIG_PREEMPT
65 static inline void __lock_kernel(void)
66 {
67         preempt_disable();
68         if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
69                 /*
70                  * If preemption was disabled even before this
71                  * was called, there's nothing we can be polite
72                  * about - just spin.
73                  */
74                 if (preempt_count() > 1) {
75                         do_raw_spin_lock(&kernel_flag);
76                         return;
77                 }
78
79                 /*
80                  * Otherwise, let's wait for the kernel lock
81                  * with preemption enabled..
82                  */
83                 do {
84                         preempt_enable();
85                         while (raw_spin_is_locked(&kernel_flag))
86                                 cpu_relax();
87                         preempt_disable();
88                 } while (!do_raw_spin_trylock(&kernel_flag));
89         }
90 }
91
92 #else
93
94 /*
95  * Non-preemption case - just get the spinlock
96  */
97 static inline void __lock_kernel(void)
98 {
99         do_raw_spin_lock(&kernel_flag);
100 }
101 #endif
102
103 static inline void __unlock_kernel(void)
104 {
105         /*
106          * the BKL is not covered by lockdep, so we open-code the
107          * unlocking sequence (and thus avoid the dep-chain ops):
108          */
109         do_raw_spin_unlock(&kernel_flag);
110         preempt_enable();
111 }
112
113 /*
114  * Getting the big kernel lock.
115  *
116  * This cannot happen asynchronously, so we only need to
117  * worry about other CPU's.
118  */
119 void __lockfunc _lock_kernel(const char *func, const char *file, int line)
120 {
121         int depth = current->lock_depth + 1;
122
123         trace_lock_kernel(func, file, line);
124
125         if (likely(!depth)) {
126                 might_sleep();
127                 __lock_kernel();
128         }
129         current->lock_depth = depth;
130 }
131
132 void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
133 {
134         BUG_ON(current->lock_depth < 0);
135         if (likely(--current->lock_depth < 0))
136                 __unlock_kernel();
137
138         trace_unlock_kernel(func, file, line);
139 }
140
141 EXPORT_SYMBOL(_lock_kernel);
142 EXPORT_SYMBOL(_unlock_kernel);
143