Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 1 | /* |
Waiman Long | c7114b4 | 2015-05-11 13:57:11 -0400 | [diff] [blame] | 2 | * Queued read/write locks |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
| 15 | * |
| 16 | * Authors: Waiman Long <waiman.long@hp.com> |
| 17 | */ |
| 18 | #include <linux/smp.h> |
| 19 | #include <linux/bug.h> |
| 20 | #include <linux/cpumask.h> |
| 21 | #include <linux/percpu.h> |
| 22 | #include <linux/hardirq.h> |
Babu Moger | 9ab6055 | 2017-05-24 17:55:10 -0600 | [diff] [blame^] | 23 | #include <linux/spinlock.h> |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 24 | #include <asm/qrwlock.h> |
| 25 | |
Waiman Long | 405963b | 2015-06-09 11:19:13 -0400 | [diff] [blame] | 26 | /* |
| 27 | * This internal data structure is used for optimizing access to some of |
| 28 | * the subfields within the atomic_t cnts. |
| 29 | */ |
| 30 | struct __qrwlock { |
| 31 | union { |
| 32 | atomic_t cnts; |
| 33 | struct { |
| 34 | #ifdef __LITTLE_ENDIAN |
| 35 | u8 wmode; /* Writer mode */ |
| 36 | u8 rcnts[3]; /* Reader counts */ |
| 37 | #else |
| 38 | u8 rcnts[3]; /* Reader counts */ |
| 39 | u8 wmode; /* Writer mode */ |
| 40 | #endif |
| 41 | }; |
| 42 | }; |
| 43 | arch_spinlock_t lock; |
| 44 | }; |
| 45 | |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 46 | /** |
| 47 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone |
| 48 | * @lock : Pointer to queue rwlock structure |
| 49 | * @writer: Current queue rwlock writer status byte |
| 50 | * |
| 51 | * In interrupt context or at the head of the queue, the reader will just |
| 52 | * increment the reader count & wait until the writer releases the lock. |
| 53 | */ |
| 54 | static __always_inline void |
| 55 | rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) |
| 56 | { |
| 57 | while ((cnts & _QW_WMASK) == _QW_LOCKED) { |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 58 | cpu_relax(); |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 59 | cnts = atomic_read_acquire(&lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | |
| 63 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 64 | * queued_read_lock_slowpath - acquire read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 65 | * @lock: Pointer to queue rwlock structure |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 66 | * @cnts: Current qrwlock lock value |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 67 | */ |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 68 | void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 69 | { |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 70 | /* |
| 71 | * Readers come here when they cannot get the lock without waiting |
| 72 | */ |
| 73 | if (unlikely(in_interrupt())) { |
| 74 | /* |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 75 | * Readers in interrupt context will get the lock immediately |
| 76 | * if the writer is just waiting (not holding the lock yet). |
| 77 | * The rspin_until_writer_unlock() function returns immediately |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 78 | * in this case. Otherwise, they will spin (with ACQUIRE |
| 79 | * semantics) until the lock is available without waiting in |
| 80 | * the queue. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 81 | */ |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 82 | rspin_until_writer_unlock(lock, cnts); |
| 83 | return; |
| 84 | } |
| 85 | atomic_sub(_QR_BIAS, &lock->cnts); |
| 86 | |
| 87 | /* |
| 88 | * Put the reader into the wait queue |
| 89 | */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 90 | arch_spin_lock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 91 | |
| 92 | /* |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 93 | * The ACQUIRE semantics of the following spinning code ensure |
| 94 | * that accesses can't leak upwards out of our subsequent critical |
| 95 | * section in the case that the lock is currently held for write. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 96 | */ |
Peter Zijlstra | f9852b7 | 2016-04-18 01:27:03 +0200 | [diff] [blame] | 97 | cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 98 | rspin_until_writer_unlock(lock, cnts); |
| 99 | |
| 100 | /* |
| 101 | * Signal the next one in queue to become queue head |
| 102 | */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 103 | arch_spin_unlock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 104 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 105 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 106 | |
| 107 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 108 | * queued_write_lock_slowpath - acquire write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 109 | * @lock : Pointer to queue rwlock structure |
| 110 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 111 | void queued_write_lock_slowpath(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 112 | { |
| 113 | u32 cnts; |
| 114 | |
| 115 | /* Put the writer into the wait queue */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 116 | arch_spin_lock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 117 | |
| 118 | /* Try to acquire the lock directly if no reader is present */ |
| 119 | if (!atomic_read(&lock->cnts) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 120 | (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 121 | goto unlock; |
| 122 | |
| 123 | /* |
| 124 | * Set the waiting flag to notify readers that a writer is pending, |
| 125 | * or wait for a previous writer to go away. |
| 126 | */ |
| 127 | for (;;) { |
Waiman Long | 405963b | 2015-06-09 11:19:13 -0400 | [diff] [blame] | 128 | struct __qrwlock *l = (struct __qrwlock *)lock; |
| 129 | |
| 130 | if (!READ_ONCE(l->wmode) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 131 | (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 132 | break; |
| 133 | |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 134 | cpu_relax(); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /* When no more readers, set the locked flag */ |
| 138 | for (;;) { |
| 139 | cnts = atomic_read(&lock->cnts); |
| 140 | if ((cnts == _QW_WAITING) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 141 | (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, |
| 142 | _QW_LOCKED) == _QW_WAITING)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 143 | break; |
| 144 | |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 145 | cpu_relax(); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 146 | } |
| 147 | unlock: |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 148 | arch_spin_unlock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 149 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 150 | EXPORT_SYMBOL(queued_write_lock_slowpath); |