[SPARC32]: Fix SMP build regression
[linux-2.6.git] / arch / sparc / lib / atomic32.c
1 /*
2  * atomic32.c: 32-bit atomic_t implementation
3  *
4  * Copyright (C) 2004 Keith M Wesolowski
5  * 
6  * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
7  */
8
9 #include <asm/atomic.h>
10 #include <linux/spinlock.h>
11 #include <linux/module.h>
12
13 #ifdef CONFIG_SMP
14 #define ATOMIC_HASH_SIZE        4
15 #define ATOMIC_HASH(a)  (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
16
17 spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
18         [0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
19 };
20
21 #else /* SMP */
22
23 static DEFINE_SPINLOCK(dummy);
24 #define ATOMIC_HASH_SIZE        1
25 #define ATOMIC_HASH(a)          (&dummy)
26
27 #endif /* SMP */
28
29 int __atomic_add_return(int i, atomic_t *v)
30 {
31         int ret;
32         unsigned long flags;
33         spin_lock_irqsave(ATOMIC_HASH(v), flags);
34
35         ret = (v->counter += i);
36
37         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
38         return ret;
39 }
40 EXPORT_SYMBOL(__atomic_add_return);
41
42 int atomic_cmpxchg(atomic_t *v, int old, int new)
43 {
44         int ret;
45         unsigned long flags;
46
47         spin_lock_irqsave(ATOMIC_HASH(v), flags);
48         ret = v->counter;
49         if (likely(ret == old))
50                 v->counter = new;
51
52         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
53         return ret;
54 }
55 EXPORT_SYMBOL(atomic_cmpxchg);
56
57 int atomic_add_unless(atomic_t *v, int a, int u)
58 {
59         int ret;
60         unsigned long flags;
61
62         spin_lock_irqsave(ATOMIC_HASH(v), flags);
63         ret = v->counter;
64         if (ret != u)
65                 v->counter += a;
66         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
67         return ret != u;
68 }
69 EXPORT_SYMBOL(atomic_add_unless);
70
71 /* Atomic operations are already serializing */
72 void atomic_set(atomic_t *v, int i)
73 {
74         unsigned long flags;
75
76         spin_lock_irqsave(ATOMIC_HASH(v), flags);
77         v->counter = i;
78         spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
79 }
80 EXPORT_SYMBOL(atomic_set);
81
82 unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
83 {
84         unsigned long old, flags;
85
86         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
87         old = *addr;
88         *addr = old | mask;
89         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
90
91         return old & mask;
92 }
93 EXPORT_SYMBOL(___set_bit);
94
95 unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
96 {
97         unsigned long old, flags;
98
99         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
100         old = *addr;
101         *addr = old & ~mask;
102         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
103
104         return old & mask;
105 }
106 EXPORT_SYMBOL(___clear_bit);
107
108 unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
109 {
110         unsigned long old, flags;
111
112         spin_lock_irqsave(ATOMIC_HASH(addr), flags);
113         old = *addr;
114         *addr = old ^ mask;
115         spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
116
117         return old & mask;
118 }
119 EXPORT_SYMBOL(___change_bit);