locking: Convert raw_rwlock functions to arch_rwlock
[linux-2.6.git] / arch / s390 / lib / spinlock.c
1 /*
2  *  arch/s390/lib/spinlock.c
3  *    Out of line spinlock code.
4  *
5  *    Copyright (C) IBM Corp. 2004, 2006
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/types.h>
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <asm/io.h>
14
15 int spin_retry = 1000;
16
17 /**
18  * spin_retry= parameter
19  */
20 static int __init spin_retry_setup(char *str)
21 {
22         spin_retry = simple_strtoul(str, &str, 0);
23         return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26
27 static inline void _raw_yield(void)
28 {
29         if (MACHINE_HAS_DIAG44)
30                 asm volatile("diag 0,0,0x44");
31 }
32
33 static inline void _raw_yield_cpu(int cpu)
34 {
35         if (MACHINE_HAS_DIAG9C)
36                 asm volatile("diag %0,0,0x9c"
37                              : : "d" (__cpu_logical_map[cpu]));
38         else
39                 _raw_yield();
40 }
41
42 void arch_spin_lock_wait(arch_spinlock_t *lp)
43 {
44         int count = spin_retry;
45         unsigned int cpu = ~smp_processor_id();
46
47         while (1) {
48                 if (count-- <= 0) {
49                         unsigned int owner = lp->owner_cpu;
50                         if (owner != 0)
51                                 _raw_yield_cpu(~owner);
52                         count = spin_retry;
53                 }
54                 if (arch_spin_is_locked(lp))
55                         continue;
56                 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57                         return;
58         }
59 }
60 EXPORT_SYMBOL(arch_spin_lock_wait);
61
62 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63 {
64         int count = spin_retry;
65         unsigned int cpu = ~smp_processor_id();
66
67         local_irq_restore(flags);
68         while (1) {
69                 if (count-- <= 0) {
70                         unsigned int owner = lp->owner_cpu;
71                         if (owner != 0)
72                                 _raw_yield_cpu(~owner);
73                         count = spin_retry;
74                 }
75                 if (arch_spin_is_locked(lp))
76                         continue;
77                 local_irq_disable();
78                 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
79                         return;
80                 local_irq_restore(flags);
81         }
82 }
83 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84
85 int arch_spin_trylock_retry(arch_spinlock_t *lp)
86 {
87         unsigned int cpu = ~smp_processor_id();
88         int count;
89
90         for (count = spin_retry; count > 0; count--) {
91                 if (arch_spin_is_locked(lp))
92                         continue;
93                 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94                         return 1;
95         }
96         return 0;
97 }
98 EXPORT_SYMBOL(arch_spin_trylock_retry);
99
100 void arch_spin_relax(arch_spinlock_t *lock)
101 {
102         unsigned int cpu = lock->owner_cpu;
103         if (cpu != 0)
104                 _raw_yield_cpu(~cpu);
105 }
106 EXPORT_SYMBOL(arch_spin_relax);
107
108 void _raw_read_lock_wait(arch_rwlock_t *rw)
109 {
110         unsigned int old;
111         int count = spin_retry;
112
113         while (1) {
114                 if (count-- <= 0) {
115                         _raw_yield();
116                         count = spin_retry;
117                 }
118                 if (!arch_read_can_lock(rw))
119                         continue;
120                 old = rw->lock & 0x7fffffffU;
121                 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
122                         return;
123         }
124 }
125 EXPORT_SYMBOL(_raw_read_lock_wait);
126
127 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
128 {
129         unsigned int old;
130         int count = spin_retry;
131
132         local_irq_restore(flags);
133         while (1) {
134                 if (count-- <= 0) {
135                         _raw_yield();
136                         count = spin_retry;
137                 }
138                 if (!arch_read_can_lock(rw))
139                         continue;
140                 old = rw->lock & 0x7fffffffU;
141                 local_irq_disable();
142                 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
143                         return;
144         }
145 }
146 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
147
148 int _raw_read_trylock_retry(arch_rwlock_t *rw)
149 {
150         unsigned int old;
151         int count = spin_retry;
152
153         while (count-- > 0) {
154                 if (!arch_read_can_lock(rw))
155                         continue;
156                 old = rw->lock & 0x7fffffffU;
157                 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
158                         return 1;
159         }
160         return 0;
161 }
162 EXPORT_SYMBOL(_raw_read_trylock_retry);
163
164 void _raw_write_lock_wait(arch_rwlock_t *rw)
165 {
166         int count = spin_retry;
167
168         while (1) {
169                 if (count-- <= 0) {
170                         _raw_yield();
171                         count = spin_retry;
172                 }
173                 if (!arch_write_can_lock(rw))
174                         continue;
175                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
176                         return;
177         }
178 }
179 EXPORT_SYMBOL(_raw_write_lock_wait);
180
181 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
182 {
183         int count = spin_retry;
184
185         local_irq_restore(flags);
186         while (1) {
187                 if (count-- <= 0) {
188                         _raw_yield();
189                         count = spin_retry;
190                 }
191                 if (!arch_write_can_lock(rw))
192                         continue;
193                 local_irq_disable();
194                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
195                         return;
196         }
197 }
198 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
199
200 int _raw_write_trylock_retry(arch_rwlock_t *rw)
201 {
202         int count = spin_retry;
203
204         while (count-- > 0) {
205                 if (!arch_write_can_lock(rw))
206                         continue;
207                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
208                         return 1;
209         }
210         return 0;
211 }
212 EXPORT_SYMBOL(_raw_write_trylock_retry);