blob: a3167941093b74657dc1bea02a7febe3343c8e90 [file] [log] [blame]
Peter Zijlstrafb0527b2014-01-29 12:51:42 +01001#include <linux/percpu.h>
Peter Zijlstrafb0527b2014-01-29 12:51:42 +01002#include <linux/sched.h>
Davidlohr Buesod84b6722015-01-06 11:45:07 -08003#include <linux/osq_lock.h>
Peter Zijlstrafb0527b2014-01-29 12:51:42 +01004
5/*
6 * An MCS like lock especially tailored for optimistic spinning for sleeping
7 * lock implementations (mutex, rwsem, etc).
8 *
9 * Using a single mcs node per CPU is safe because sleeping locks should not be
10 * called from interrupt context and we have preemption disabled while
11 * spinning.
12 */
Jason Low046a6192014-07-14 10:27:48 -070013static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010014
15/*
Jason Low90631822014-07-14 10:27:49 -070016 * We use the value 0 to represent "no CPU", thus the encoded value
17 * will be the CPU number incremented by 1.
18 */
19static inline int encode_cpu(int cpu_nr)
20{
21 return cpu_nr + 1;
22}
23
Pan Xinhui5aff60a2016-11-02 05:08:29 -040024static inline int node_cpu(struct optimistic_spin_node *node)
25{
26 return node->cpu - 1;
27}
28
Jason Low90631822014-07-14 10:27:49 -070029static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
30{
31 int cpu_nr = encoded_cpu_val - 1;
32
33 return per_cpu_ptr(&osq_node, cpu_nr);
34}
35
36/*
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010037 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
38 * Can return NULL in case we were the last queued and we updated @lock instead.
39 */
Jason Low046a6192014-07-14 10:27:48 -070040static inline struct optimistic_spin_node *
Jason Low90631822014-07-14 10:27:49 -070041osq_wait_next(struct optimistic_spin_queue *lock,
Jason Low046a6192014-07-14 10:27:48 -070042 struct optimistic_spin_node *node,
43 struct optimistic_spin_node *prev)
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010044{
Jason Low046a6192014-07-14 10:27:48 -070045 struct optimistic_spin_node *next = NULL;
Jason Low90631822014-07-14 10:27:49 -070046 int curr = encode_cpu(smp_processor_id());
47 int old;
48
49 /*
50 * If there is a prev node in queue, then the 'old' value will be
51 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
52 * we're currently last in queue, then the queue will then become empty.
53 */
54 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010055
56 for (;;) {
Jason Low90631822014-07-14 10:27:49 -070057 if (atomic_read(&lock->tail) == curr &&
Davidlohr Buesoc55a6ff2015-09-14 00:37:24 -070058 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010059 /*
60 * We were the last queued, we moved @lock back. @prev
61 * will now observe @lock and will complete its
62 * unlock()/unqueue().
63 */
64 break;
65 }
66
67 /*
68 * We must xchg() the @node->next value, because if we were to
69 * leave it in, a concurrent unlock()/unqueue() from
70 * @node->next might complete Step-A and think its @prev is
71 * still valid.
72 *
73 * If the concurrent unlock()/unqueue() wins the race, we'll
74 * wait for either @lock to point to us, through its Step-B, or
75 * wait for a new @node->next from its Step-C.
76 */
77 if (node->next) {
78 next = xchg(&node->next, NULL);
79 if (next)
80 break;
81 }
82
Christian Borntraegerf2f09a42016-10-25 11:03:14 +020083 cpu_relax();
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010084 }
85
86 return next;
87}
88
Jason Low90631822014-07-14 10:27:49 -070089bool osq_lock(struct optimistic_spin_queue *lock)
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010090{
Jason Low046a6192014-07-14 10:27:48 -070091 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
92 struct optimistic_spin_node *prev, *next;
Jason Low90631822014-07-14 10:27:49 -070093 int curr = encode_cpu(smp_processor_id());
94 int old;
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010095
96 node->locked = 0;
97 node->next = NULL;
Jason Low90631822014-07-14 10:27:49 -070098 node->cpu = curr;
Peter Zijlstrafb0527b2014-01-29 12:51:42 +010099
Davidlohr Buesoc55a6ff2015-09-14 00:37:24 -0700100 /*
Will Deaconb4b29f92015-12-11 17:46:41 +0000101 * We need both ACQUIRE (pairs with corresponding RELEASE in
102 * unlock() uncontended, or fastpath) and RELEASE (to publish
103 * the node fields we just initialised) semantics when updating
104 * the lock tail.
Davidlohr Buesoc55a6ff2015-09-14 00:37:24 -0700105 */
Will Deaconb4b29f92015-12-11 17:46:41 +0000106 old = atomic_xchg(&lock->tail, curr);
Jason Low90631822014-07-14 10:27:49 -0700107 if (old == OSQ_UNLOCKED_VAL)
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100108 return true;
109
Jason Low90631822014-07-14 10:27:49 -0700110 prev = decode_cpu(old);
111 node->prev = prev;
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800112 WRITE_ONCE(prev->next, node);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100113
114 /*
115 * Normally @prev is untouchable after the above store; because at that
116 * moment unlock can proceed and wipe the node element from stack.
117 *
118 * However, since our nodes are static per-cpu storage, we're
119 * guaranteed their existence -- this allows us to apply
120 * cmpxchg in an attempt to undo our queueing.
121 */
122
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800123 while (!READ_ONCE(node->locked)) {
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100124 /*
125 * If we need to reschedule bail... so we can block.
Pan Xinhui5aff60a2016-11-02 05:08:29 -0400126 * Use vcpu_is_preempted() to avoid waiting for a preempted
127 * lock holder:
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100128 */
Pan Xinhui5aff60a2016-11-02 05:08:29 -0400129 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100130 goto unqueue;
131
Christian Borntraegerf2f09a42016-10-25 11:03:14 +0200132 cpu_relax();
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100133 }
134 return true;
135
136unqueue:
137 /*
138 * Step - A -- stabilize @prev
139 *
140 * Undo our @prev->next assignment; this will make @prev's
141 * unlock()/unqueue() wait for a next pointer since @lock points to us
142 * (or later).
143 */
144
145 for (;;) {
146 if (prev->next == node &&
147 cmpxchg(&prev->next, node, NULL) == node)
148 break;
149
150 /*
151 * We can only fail the cmpxchg() racing against an unlock(),
152 * in which case we should observe @node->locked becomming
153 * true.
154 */
155 if (smp_load_acquire(&node->locked))
156 return true;
157
Christian Borntraegerf2f09a42016-10-25 11:03:14 +0200158 cpu_relax();
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100159
160 /*
161 * Or we race against a concurrent unqueue()'s step-B, in which
162 * case its step-C will write us a new @node->prev pointer.
163 */
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800164 prev = READ_ONCE(node->prev);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100165 }
166
167 /*
168 * Step - B -- stabilize @next
169 *
170 * Similar to unlock(), wait for @node->next or move @lock from @node
171 * back to @prev.
172 */
173
174 next = osq_wait_next(lock, node, prev);
175 if (!next)
176 return false;
177
178 /*
179 * Step - C -- unlink
180 *
181 * @prev is stable because its still waiting for a new @prev->next
182 * pointer, @next is stable because our @node->next pointer is NULL and
183 * it will wait in Step-A.
184 */
185
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800186 WRITE_ONCE(next->prev, prev);
187 WRITE_ONCE(prev->next, next);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100188
189 return false;
190}
191
Jason Low90631822014-07-14 10:27:49 -0700192void osq_unlock(struct optimistic_spin_queue *lock)
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100193{
Jason Low33ecd202014-07-14 10:27:51 -0700194 struct optimistic_spin_node *node, *next;
Jason Low90631822014-07-14 10:27:49 -0700195 int curr = encode_cpu(smp_processor_id());
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100196
197 /*
198 * Fast path for the uncontended case.
199 */
Davidlohr Buesoc55a6ff2015-09-14 00:37:24 -0700200 if (likely(atomic_cmpxchg_release(&lock->tail, curr,
201 OSQ_UNLOCKED_VAL) == curr))
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100202 return;
203
204 /*
205 * Second most likely case.
206 */
Jason Low33ecd202014-07-14 10:27:51 -0700207 node = this_cpu_ptr(&osq_node);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100208 next = xchg(&node->next, NULL);
209 if (next) {
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800210 WRITE_ONCE(next->locked, 1);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100211 return;
212 }
213
214 next = osq_wait_next(lock, node, NULL);
215 if (next)
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800216 WRITE_ONCE(next->locked, 1);
Peter Zijlstrafb0527b2014-01-29 12:51:42 +0100217}