Linux-2.6.12-rc2
[linux-3.10.git] / arch / arm26 / kernel / semaphore.c
1 /*
2  *  ARM semaphore implementation, taken from
3  *
4  *  i386 semaphore implementation.
5  *
6  *  (C) Copyright 1999 Linus Torvalds
7  *  (C) Copyright 2003 Ian Molton (ARM26 mods)
8  *
9  *  Modified for ARM by Russell King
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #include <linux/module.h>
16 #include <linux/config.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20
21 #include <asm/semaphore.h>
22
23 /*
24  * Semaphores are implemented using a two-way counter:
25  * The "count" variable is decremented for each process
26  * that tries to acquire the semaphore, while the "sleeping"
27  * variable is a count of such acquires.
28  *
29  * Notably, the inline "up()" and "down()" functions can
30  * efficiently test if they need to do any extra work (up
31  * needs to do something only if count was negative before
32  * the increment operation.
33  *
34  * "sleeping" and the contention routine ordering is
35  * protected by the semaphore spinlock.
36  *
37  * Note that these functions are only called when there is
38  * contention on the lock, and as such all this is the
39  * "non-critical" part of the whole semaphore business. The
40  * critical part is the inline stuff in <asm/semaphore.h>
41  * where we want to avoid any extra jumps and calls.
42  */
43
44 /*
45  * Logic:
46  *  - only on a boundary condition do we need to care. When we go
47  *    from a negative count to a non-negative, we wake people up.
48  *  - when we go from a non-negative count to a negative do we
49  *    (a) synchronize with the "sleeper" count and (b) make sure
50  *    that we're on the wakeup list before we synchronize so that
51  *    we cannot lose wakeup events.
52  */
53
54 void __up(struct semaphore *sem)
55 {
56         wake_up(&sem->wait);
57 }
58
59 static DEFINE_SPINLOCK(semaphore_lock);
60
61 void __sched __down(struct semaphore * sem)
62 {
63         struct task_struct *tsk = current;
64         DECLARE_WAITQUEUE(wait, tsk);
65         tsk->state = TASK_UNINTERRUPTIBLE;
66         add_wait_queue_exclusive(&sem->wait, &wait);
67
68         spin_lock_irq(&semaphore_lock);
69         sem->sleepers++;
70         for (;;) {
71                 int sleepers = sem->sleepers;
72
73                 /*
74                  * Add "everybody else" into it. They aren't
75                  * playing, because we own the spinlock.
76                  */
77                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
78                         sem->sleepers = 0;
79                         break;
80                 }
81                 sem->sleepers = 1;      /* us - see -1 above */
82                 spin_unlock_irq(&semaphore_lock);
83
84                 schedule();
85                 tsk->state = TASK_UNINTERRUPTIBLE;
86                 spin_lock_irq(&semaphore_lock);
87         }
88         spin_unlock_irq(&semaphore_lock);
89         remove_wait_queue(&sem->wait, &wait);
90         tsk->state = TASK_RUNNING;
91         wake_up(&sem->wait);
92 }
93
94 int __sched __down_interruptible(struct semaphore * sem)
95 {
96         int retval = 0;
97         struct task_struct *tsk = current;
98         DECLARE_WAITQUEUE(wait, tsk);
99         tsk->state = TASK_INTERRUPTIBLE;
100         add_wait_queue_exclusive(&sem->wait, &wait);
101
102         spin_lock_irq(&semaphore_lock);
103         sem->sleepers ++;
104         for (;;) {
105                 int sleepers = sem->sleepers;
106
107                 /*
108                  * With signals pending, this turns into
109                  * the trylock failure case - we won't be
110                  * sleeping, and we* can't get the lock as
111                  * it has contention. Just correct the count
112                  * and exit.
113                  */
114                 if (signal_pending(current)) {
115                         retval = -EINTR;
116                         sem->sleepers = 0;
117                         atomic_add(sleepers, &sem->count);
118                         break;
119                 }
120
121                 /*
122                  * Add "everybody else" into it. They aren't
123                  * playing, because we own the spinlock. The
124                  * "-1" is because we're still hoping to get
125                  * the lock.
126                  */
127                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
128                         sem->sleepers = 0;
129                         break;
130                 }
131                 sem->sleepers = 1;      /* us - see -1 above */
132                 spin_unlock_irq(&semaphore_lock);
133
134                 schedule();
135                 tsk->state = TASK_INTERRUPTIBLE;
136                 spin_lock_irq(&semaphore_lock);
137         }
138         spin_unlock_irq(&semaphore_lock);
139         tsk->state = TASK_RUNNING;
140         remove_wait_queue(&sem->wait, &wait);
141         wake_up(&sem->wait);
142         return retval;
143 }
144
145 /*
146  * Trylock failed - make sure we correct for
147  * having decremented the count.
148  *
149  * We could have done the trylock with a
150  * single "cmpxchg" without failure cases,
151  * but then it wouldn't work on a 386.
152  */
153 int __down_trylock(struct semaphore * sem)
154 {
155         int sleepers;
156         unsigned long flags;
157
158         spin_lock_irqsave(&semaphore_lock, flags);
159         sleepers = sem->sleepers + 1;
160         sem->sleepers = 0;
161
162         /*
163          * Add "everybody else" and us into it. They aren't
164          * playing, because we own the spinlock.
165          */
166         if (!atomic_add_negative(sleepers, &sem->count))
167                 wake_up(&sem->wait);
168
169         spin_unlock_irqrestore(&semaphore_lock, flags);
170         return 1;
171 }
172
173 /*
174  * The semaphore operations have a special calling sequence that
175  * allow us to do a simpler in-line version of them. These routines
176  * need to convert that sequence back into the C sequence when
177  * there is contention on the semaphore.
178  *
179  * ip contains the semaphore pointer on entry. Save the C-clobbered
180  * registers (r0 to r3 and lr), but not ip, as we use it as a return
181  * value in some cases..
182  */
183 asm("   .section .sched.text , #alloc, #execinstr       \n\
184         .align  5                               \n\
185         .globl  __down_failed                   \n\
186 __down_failed:                                  \n\
187         stmfd   sp!, {r0 - r3, lr}              \n\
188         mov     r0, ip                          \n\
189         bl      __down                          \n\
190         ldmfd   sp!, {r0 - r3, pc}^             \n\
191                                                 \n\
192         .align  5                               \n\
193         .globl  __down_interruptible_failed     \n\
194 __down_interruptible_failed:                    \n\
195         stmfd   sp!, {r0 - r3, lr}              \n\
196         mov     r0, ip                          \n\
197         bl      __down_interruptible            \n\
198         mov     ip, r0                          \n\
199         ldmfd   sp!, {r0 - r3, pc}^             \n\
200                                                 \n\
201         .align  5                               \n\
202         .globl  __down_trylock_failed           \n\
203 __down_trylock_failed:                          \n\
204         stmfd   sp!, {r0 - r3, lr}              \n\
205         mov     r0, ip                          \n\
206         bl      __down_trylock                  \n\
207         mov     ip, r0                          \n\
208         ldmfd   sp!, {r0 - r3, pc}^             \n\
209                                                 \n\
210         .align  5                               \n\
211         .globl  __up_wakeup                     \n\
212 __up_wakeup:                                    \n\
213         stmfd   sp!, {r0 - r3, lr}              \n\
214         mov     r0, ip                          \n\
215         bl      __up                            \n\
216         ldmfd   sp!, {r0 - r3, pc}^             \n\
217         ");
218
219 EXPORT_SYMBOL(__down_failed);
220 EXPORT_SYMBOL(__down_interruptible_failed);
221 EXPORT_SYMBOL(__down_trylock_failed);
222 EXPORT_SYMBOL(__up_wakeup);
223