blob: 2655f26ec882689f42d2ba7831209f166ce45869 [file] [log] [blame]
Waiman Long70af2f82014-02-03 13:18:49 +01001/*
Waiman Longc7114b42015-05-11 13:57:11 -04002 * Queued read/write locks
Waiman Long70af2f82014-02-03 13:18:49 +01003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18#include <linux/smp.h>
19#include <linux/bug.h>
20#include <linux/cpumask.h>
21#include <linux/percpu.h>
22#include <linux/hardirq.h>
Babu Moger9ab60552017-05-24 17:55:10 -060023#include <linux/spinlock.h>
Waiman Long70af2f82014-02-03 13:18:49 +010024#include <asm/qrwlock.h>
25
Waiman Long405963b2015-06-09 11:19:13 -040026/*
27 * This internal data structure is used for optimizing access to some of
28 * the subfields within the atomic_t cnts.
29 */
30struct __qrwlock {
31 union {
32 atomic_t cnts;
33 struct {
34#ifdef __LITTLE_ENDIAN
35 u8 wmode; /* Writer mode */
36 u8 rcnts[3]; /* Reader counts */
37#else
38 u8 rcnts[3]; /* Reader counts */
39 u8 wmode; /* Writer mode */
40#endif
41 };
42 };
43 arch_spinlock_t lock;
44};
45
Waiman Long70af2f82014-02-03 13:18:49 +010046/**
47 * rspin_until_writer_unlock - inc reader count & spin until writer is gone
48 * @lock : Pointer to queue rwlock structure
49 * @writer: Current queue rwlock writer status byte
50 *
51 * In interrupt context or at the head of the queue, the reader will just
52 * increment the reader count & wait until the writer releases the lock.
53 */
54static __always_inline void
55rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
56{
57 while ((cnts & _QW_WMASK) == _QW_LOCKED) {
Christian Borntraegerf2f09a42016-10-25 11:03:14 +020058 cpu_relax();
Will Deacon77e430e2015-08-06 17:54:42 +010059 cnts = atomic_read_acquire(&lock->cnts);
Waiman Long70af2f82014-02-03 13:18:49 +010060 }
61}
62
63/**
Waiman Longf7d71f22015-06-19 11:50:00 -040064 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
Waiman Long70af2f82014-02-03 13:18:49 +010065 * @lock: Pointer to queue rwlock structure
Waiman Long0e06e5b2015-06-19 11:50:01 -040066 * @cnts: Current qrwlock lock value
Waiman Long70af2f82014-02-03 13:18:49 +010067 */
Waiman Long0e06e5b2015-06-19 11:50:01 -040068void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
Waiman Long70af2f82014-02-03 13:18:49 +010069{
Waiman Long70af2f82014-02-03 13:18:49 +010070 /*
71 * Readers come here when they cannot get the lock without waiting
72 */
73 if (unlikely(in_interrupt())) {
74 /*
Waiman Long0e06e5b2015-06-19 11:50:01 -040075 * Readers in interrupt context will get the lock immediately
76 * if the writer is just waiting (not holding the lock yet).
77 * The rspin_until_writer_unlock() function returns immediately
Will Deacon77e430e2015-08-06 17:54:42 +010078 * in this case. Otherwise, they will spin (with ACQUIRE
79 * semantics) until the lock is available without waiting in
80 * the queue.
Waiman Long70af2f82014-02-03 13:18:49 +010081 */
Waiman Long70af2f82014-02-03 13:18:49 +010082 rspin_until_writer_unlock(lock, cnts);
83 return;
84 }
85 atomic_sub(_QR_BIAS, &lock->cnts);
86
87 /*
88 * Put the reader into the wait queue
89 */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -070090 arch_spin_lock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +010091
92 /*
Will Deacon77e430e2015-08-06 17:54:42 +010093 * The ACQUIRE semantics of the following spinning code ensure
94 * that accesses can't leak upwards out of our subsequent critical
95 * section in the case that the lock is currently held for write.
Waiman Long70af2f82014-02-03 13:18:49 +010096 */
Peter Zijlstraf9852b72016-04-18 01:27:03 +020097 cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
Waiman Long70af2f82014-02-03 13:18:49 +010098 rspin_until_writer_unlock(lock, cnts);
99
100 /*
101 * Signal the next one in queue to become queue head
102 */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -0700103 arch_spin_unlock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +0100104}
Waiman Longf7d71f22015-06-19 11:50:00 -0400105EXPORT_SYMBOL(queued_read_lock_slowpath);
Waiman Long70af2f82014-02-03 13:18:49 +0100106
107/**
Waiman Longf7d71f22015-06-19 11:50:00 -0400108 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
Waiman Long70af2f82014-02-03 13:18:49 +0100109 * @lock : Pointer to queue rwlock structure
110 */
Waiman Longf7d71f22015-06-19 11:50:00 -0400111void queued_write_lock_slowpath(struct qrwlock *lock)
Waiman Long70af2f82014-02-03 13:18:49 +0100112{
113 u32 cnts;
114
115 /* Put the writer into the wait queue */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -0700116 arch_spin_lock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +0100117
118 /* Try to acquire the lock directly if no reader is present */
119 if (!atomic_read(&lock->cnts) &&
Will Deacon77e430e2015-08-06 17:54:42 +0100120 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
Waiman Long70af2f82014-02-03 13:18:49 +0100121 goto unlock;
122
123 /*
124 * Set the waiting flag to notify readers that a writer is pending,
125 * or wait for a previous writer to go away.
126 */
127 for (;;) {
Waiman Long405963b2015-06-09 11:19:13 -0400128 struct __qrwlock *l = (struct __qrwlock *)lock;
129
130 if (!READ_ONCE(l->wmode) &&
Will Deacon77e430e2015-08-06 17:54:42 +0100131 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
Waiman Long70af2f82014-02-03 13:18:49 +0100132 break;
133
Christian Borntraegerf2f09a42016-10-25 11:03:14 +0200134 cpu_relax();
Waiman Long70af2f82014-02-03 13:18:49 +0100135 }
136
137 /* When no more readers, set the locked flag */
138 for (;;) {
139 cnts = atomic_read(&lock->cnts);
140 if ((cnts == _QW_WAITING) &&
Will Deacon77e430e2015-08-06 17:54:42 +0100141 (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
142 _QW_LOCKED) == _QW_WAITING))
Waiman Long70af2f82014-02-03 13:18:49 +0100143 break;
144
Christian Borntraegerf2f09a42016-10-25 11:03:14 +0200145 cpu_relax();
Waiman Long70af2f82014-02-03 13:18:49 +0100146 }
147unlock:
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -0700148 arch_spin_unlock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +0100149}
Waiman Longf7d71f22015-06-19 11:50:00 -0400150EXPORT_SYMBOL(queued_write_lock_slowpath);