blob: 357c6d221efe5a3988565abf1321ed7f155e20a5 [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
2 * kernel/mutex.c
3 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
13 * Also see Documentation/mutex-design.txt.
14 */
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070020#include <linux/debug_locks.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080021
22/*
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
25 */
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34/***
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
Randy Dunlap0e241ff2008-07-24 16:58:42 -070037 * @key: the lock_class_key for the class; used by mutex lock debugging
Ingo Molnar6053ee32006-01-09 15:59:19 -080038 *
39 * Initialize the mutex to unlocked state.
40 *
41 * It is not allowed to initialize an already locked mutex.
42 */
Ingo Molnaref5d4702006-07-03 00:24:55 -070043void
44__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080045{
46 atomic_set(&lock->count, 1);
47 spin_lock_init(&lock->wait_lock);
48 INIT_LIST_HEAD(&lock->wait_list);
49
Ingo Molnaref5d4702006-07-03 00:24:55 -070050 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080051}
52
53EXPORT_SYMBOL(__mutex_init);
54
Peter Zijlstrae4564f72007-10-11 22:11:12 +020055#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -080056/*
57 * We split the mutex lock/unlock logic into separate fastpath and
58 * slowpath functions, to reduce the register pressure on the fastpath.
59 * We also put the fastpath first in the kernel image, to make sure the
60 * branch is predicted by the CPU as default-untaken.
61 */
Török Edwin7918baa2008-11-24 10:17:42 +020062static __used noinline void __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070063__mutex_lock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -080064
65/***
66 * mutex_lock - acquire the mutex
67 * @lock: the mutex to be acquired
68 *
69 * Lock the mutex exclusively for this task. If the mutex is not
70 * available right now, it will sleep until it can get it.
71 *
72 * The mutex must later on be released by the same task that
73 * acquired it. Recursive locking is not allowed. The task
74 * may not exit without first unlocking the mutex. Also, kernel
75 * memory where the mutex resides mutex must not be freed with
76 * the mutex still locked. The mutex must first be initialized
77 * (or statically defined) before it can be locked. memset()-ing
78 * the mutex to 0 is not allowed.
79 *
80 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
81 * checks that will enforce the restrictions and will also do
82 * deadlock debugging. )
83 *
84 * This function is similar to (but not equivalent to) down().
85 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -080086void inline __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -080087{
Ingo Molnarc544bdb2006-01-10 22:10:36 +010088 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -080089 /*
90 * The locking fastpath is the 1->0 transition from
91 * 'unlocked' into 'locked' state.
Ingo Molnar6053ee32006-01-09 15:59:19 -080092 */
93 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
94}
95
96EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +020097#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080098
Török Edwin7918baa2008-11-24 10:17:42 +020099static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800100
101/***
102 * mutex_unlock - release the mutex
103 * @lock: the mutex to be released
104 *
105 * Unlock a mutex that has been locked by this task previously.
106 *
107 * This function must not be used in interrupt context. Unlocking
108 * of a not locked mutex is not allowed.
109 *
110 * This function is similar to (but not equivalent to) up().
111 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800112void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800113{
114 /*
115 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state:
Ingo Molnar6053ee32006-01-09 15:59:19 -0800117 */
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119}
120
121EXPORT_SYMBOL(mutex_unlock);
122
123/*
124 * Lock a mutex (possibly interruptible), slowpath:
125 */
126static inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 unsigned long ip)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800129{
130 struct task_struct *task = current;
131 struct mutex_waiter waiter;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700132 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800133
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700134 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800135
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700136 debug_mutex_lock_common(lock, &waiter);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200137 mutex_acquire(&lock->dep_map, subclass, 0, ip);
Roman Zippelc9f4f062007-05-09 02:35:16 -0700138 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800139
140 /* add waiting tasks to the end of the waitqueue (FIFO): */
141 list_add_tail(&waiter.list, &lock->wait_list);
142 waiter.task = task;
143
Peter Zijlstra93d81d12009-01-14 15:32:51 +0100144 if (atomic_xchg(&lock->count, -1) == 1)
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700145 goto done;
146
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200147 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700148
Ingo Molnar6053ee32006-01-09 15:59:19 -0800149 for (;;) {
150 /*
151 * Lets try to take the lock again - this is needed even if
152 * we get here for the first time (shortly after failing to
153 * acquire the lock), to make sure that we get a wakeup once
154 * it's unlocked. Later on, if we sleep, this is the
155 * operation that gives us the lock. We xchg it to -1, so
156 * that when we release the lock, we properly wake up the
157 * other waiters:
158 */
Peter Zijlstra93d81d12009-01-14 15:32:51 +0100159 if (atomic_xchg(&lock->count, -1) == 1)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800160 break;
161
162 /*
163 * got a signal? (This code gets eliminated in the
164 * TASK_UNINTERRUPTIBLE case.)
165 */
Oleg Nesterov6ad36762008-06-08 21:20:42 +0400166 if (unlikely(signal_pending_state(state, task))) {
Liam R. Howlettad776532007-12-06 17:37:59 -0500167 mutex_remove_waiter(lock, &waiter,
168 task_thread_info(task));
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200169 mutex_release(&lock->dep_map, 1, ip);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700170 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800171
172 debug_mutex_free_waiter(&waiter);
173 return -EINTR;
174 }
175 __set_task_state(task, state);
176
177 /* didnt get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700178 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800179 schedule();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700180 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800181 }
182
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700183done:
Peter Zijlstrac7e78cf2008-10-16 23:17:09 +0200184 lock_acquired(&lock->dep_map, ip);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800185 /* got the lock - rejoice! */
Roman Zippelc9f4f062007-05-09 02:35:16 -0700186 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
187 debug_mutex_set_owner(lock, task_thread_info(task));
Ingo Molnar6053ee32006-01-09 15:59:19 -0800188
189 /* set it to 0 if there are no waiters left: */
190 if (likely(list_empty(&lock->wait_list)))
191 atomic_set(&lock->count, 0);
192
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700193 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800194
195 debug_mutex_free_waiter(&waiter);
196
Ingo Molnar6053ee32006-01-09 15:59:19 -0800197 return 0;
198}
199
Ingo Molnaref5d4702006-07-03 00:24:55 -0700200#ifdef CONFIG_DEBUG_LOCK_ALLOC
201void __sched
202mutex_lock_nested(struct mutex *lock, unsigned int subclass)
203{
204 might_sleep();
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700206}
207
208EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800209
210int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500211mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
212{
213 might_sleep();
214 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
215}
216EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
217
218int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800219mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
220{
221 might_sleep();
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200222 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
NeilBrownd63a5a72006-12-08 02:36:17 -0800223}
224
225EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700226#endif
227
Ingo Molnar6053ee32006-01-09 15:59:19 -0800228/*
229 * Release the lock, slowpath:
230 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800231static inline void
Ingo Molnaref5d4702006-07-03 00:24:55 -0700232__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800233{
Ingo Molnar02706642006-01-10 23:15:02 +0100234 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700235 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800236
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700237 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700238 mutex_release(&lock->dep_map, nested, _RET_IP_);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700239 debug_mutex_unlock(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800240
241 /*
242 * some architectures leave the lock unlocked in the fastpath failure
243 * case, others need to leave it locked. In the later case we have to
244 * unlock it here
245 */
246 if (__mutex_slowpath_needs_to_unlock())
247 atomic_set(&lock->count, 1);
248
Ingo Molnar6053ee32006-01-09 15:59:19 -0800249 if (!list_empty(&lock->wait_list)) {
250 /* get the first entry from the wait-list: */
251 struct mutex_waiter *waiter =
252 list_entry(lock->wait_list.next,
253 struct mutex_waiter, list);
254
255 debug_mutex_wake_waiter(lock, waiter);
256
257 wake_up_process(waiter->task);
258 }
259
260 debug_mutex_clear_owner(lock);
261
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700262 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800263}
264
265/*
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700266 * Release the lock, slowpath:
267 */
Török Edwin7918baa2008-11-24 10:17:42 +0200268static __used noinline void
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700269__mutex_unlock_slowpath(atomic_t *lock_count)
270{
Ingo Molnaref5d4702006-07-03 00:24:55 -0700271 __mutex_unlock_common_slowpath(lock_count, 1);
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700272}
273
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200274#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700275/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800276 * Here come the less common (and hence less performance-critical) APIs:
277 * mutex_lock_interruptible() and mutex_trylock().
278 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800279static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500280__mutex_lock_killable_slowpath(atomic_t *lock_count);
281
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800282static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700283__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800284
285/***
286 * mutex_lock_interruptible - acquire the mutex, interruptable
287 * @lock: the mutex to be acquired
288 *
289 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
290 * been acquired or sleep until the mutex becomes available. If a
291 * signal arrives while waiting for the lock then this function
292 * returns -EINTR.
293 *
294 * This function is similar to (but not equivalent to) down_interruptible().
295 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800296int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800297{
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100298 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800299 return __mutex_fastpath_lock_retval
300 (&lock->count, __mutex_lock_interruptible_slowpath);
301}
302
303EXPORT_SYMBOL(mutex_lock_interruptible);
304
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800305int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500306{
307 might_sleep();
308 return __mutex_fastpath_lock_retval
309 (&lock->count, __mutex_lock_killable_slowpath);
310}
311EXPORT_SYMBOL(mutex_lock_killable);
312
Török Edwin7918baa2008-11-24 10:17:42 +0200313static __used noinline void __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200314__mutex_lock_slowpath(atomic_t *lock_count)
315{
316 struct mutex *lock = container_of(lock_count, struct mutex, count);
317
318 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
319}
320
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800321static noinline int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500322__mutex_lock_killable_slowpath(atomic_t *lock_count)
323{
324 struct mutex *lock = container_of(lock_count, struct mutex, count);
325
326 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
327}
328
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800329static noinline int __sched
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700330__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800331{
332 struct mutex *lock = container_of(lock_count, struct mutex, count);
333
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200334 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800335}
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200336#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800337
338/*
339 * Spinlock based trylock, we take the spinlock and check whether we
340 * can get the lock:
341 */
342static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
343{
344 struct mutex *lock = container_of(lock_count, struct mutex, count);
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700345 unsigned long flags;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800346 int prev;
347
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700348 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800349
350 prev = atomic_xchg(&lock->count, -1);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700351 if (likely(prev == 1)) {
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700352 debug_mutex_set_owner(lock, current_thread_info());
Ingo Molnaref5d4702006-07-03 00:24:55 -0700353 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
354 }
Ingo Molnar6053ee32006-01-09 15:59:19 -0800355 /* Set it back to 0 if there are no waiters: */
356 if (likely(list_empty(&lock->wait_list)))
357 atomic_set(&lock->count, 0);
358
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700359 spin_unlock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800360
361 return prev == 1;
362}
363
364/***
365 * mutex_trylock - try acquire the mutex, without waiting
366 * @lock: the mutex to be acquired
367 *
368 * Try to acquire the mutex atomically. Returns 1 if the mutex
369 * has been acquired successfully, and 0 on contention.
370 *
371 * NOTE: this function follows the spin_trylock() convention, so
372 * it is negated to the down_trylock() return values! Be careful
373 * about this when converting semaphore users to mutexes.
374 *
375 * This function must not be used in interrupt context. The
376 * mutex must be released by the same task that acquired it.
377 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800378int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800379{
380 return __mutex_fastpath_trylock(&lock->count,
381 __mutex_trylock_slowpath);
382}
383
384EXPORT_SYMBOL(mutex_trylock);