blob: 4adb7ca0bf47a209067c66205ace8b6f0dbebb61 [file] [log] [blame]
Paul E. McKenney9f77da92009-08-22 13:56:45 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080016 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9f77da92009-08-22 13:56:45 -070018 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31/*
Paul E. McKenney8932a632012-04-19 12:20:14 -070032 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
33 * CONFIG_RCU_FANOUT_LEAF.
Paul E. McKenney9f77da92009-08-22 13:56:45 -070034 * In theory, it should be possible to add more levels straightforwardly.
Paul E. McKenney0209f642010-12-14 16:07:52 -080035 * In practice, this did work well going from three levels to four.
36 * Of course, your mileage may vary.
Paul E. McKenney9f77da92009-08-22 13:56:45 -070037 */
Paul E. McKenney05c5df32015-04-20 14:27:43 -070038
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080039#define MAX_RCU_LVLS 4
Paul E. McKenney05c5df32015-04-20 14:27:43 -070040
41#ifdef CONFIG_RCU_FANOUT
42#define RCU_FANOUT CONFIG_RCU_FANOUT
43#else /* #ifdef CONFIG_RCU_FANOUT */
44# ifdef CONFIG_64BIT
45# define RCU_FANOUT 64
46# else
47# define RCU_FANOUT 32
48# endif
49#endif /* #else #ifdef CONFIG_RCU_FANOUT */
50
Paul E. McKenney47d631a2015-04-21 09:12:13 -070051#ifdef CONFIG_RCU_FANOUT_LEAF
52#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
53#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
54# ifdef CONFIG_64BIT
55# define RCU_FANOUT_LEAF 64
56# else
57# define RCU_FANOUT_LEAF 32
58# endif
59#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
60
61#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
Paul E. McKenney05c5df32015-04-20 14:27:43 -070062#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
63#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
64#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070065
Paul E. McKenney0209f642010-12-14 16:07:52 -080066#if NR_CPUS <= RCU_FANOUT_1
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070067# define RCU_NUM_LVLS 1
Paul E. McKenney9f77da92009-08-22 13:56:45 -070068# define NUM_RCU_LVL_0 1
69# define NUM_RCU_LVL_1 (NR_CPUS)
70# define NUM_RCU_LVL_2 0
71# define NUM_RCU_LVL_3 0
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080072# define NUM_RCU_LVL_4 0
Paul E. McKenney0209f642010-12-14 16:07:52 -080073#elif NR_CPUS <= RCU_FANOUT_2
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070074# define RCU_NUM_LVLS 2
Paul E. McKenney9f77da92009-08-22 13:56:45 -070075# define NUM_RCU_LVL_0 1
Paul E. McKenney0209f642010-12-14 16:07:52 -080076# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070077# define NUM_RCU_LVL_2 (NR_CPUS)
78# define NUM_RCU_LVL_3 0
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080079# define NUM_RCU_LVL_4 0
Paul E. McKenney0209f642010-12-14 16:07:52 -080080#elif NR_CPUS <= RCU_FANOUT_3
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070081# define RCU_NUM_LVLS 3
Paul E. McKenney9f77da92009-08-22 13:56:45 -070082# define NUM_RCU_LVL_0 1
Paul E. McKenney0209f642010-12-14 16:07:52 -080083# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
84# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
85# define NUM_RCU_LVL_3 (NR_CPUS)
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080086# define NUM_RCU_LVL_4 0
Paul E. McKenney0209f642010-12-14 16:07:52 -080087#elif NR_CPUS <= RCU_FANOUT_4
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070088# define RCU_NUM_LVLS 4
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080089# define NUM_RCU_LVL_0 1
Paul E. McKenney0209f642010-12-14 16:07:52 -080090# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
91# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
92# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
93# define NUM_RCU_LVL_4 (NR_CPUS)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070094#else
95# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
Paul E. McKenney0209f642010-12-14 16:07:52 -080096#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
Paul E. McKenney9f77da92009-08-22 13:56:45 -070097
Paul E. McKenneycf244dc2009-12-02 12:10:14 -080098#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
Paul E. McKenney9f77da92009-08-22 13:56:45 -070099#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
100
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700101extern int rcu_num_lvls;
102extern int rcu_num_nodes;
103
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700104/*
105 * Dynticks per-CPU state.
106 */
107struct rcu_dynticks {
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700108 long long dynticks_nesting; /* Track irq/process nesting level. */
109 /* Process level is worth LLONG_MAX/2. */
110 int dynticks_nmi_nesting; /* Track NMI nesting level. */
111 atomic_t dynticks; /* Even value for idle, else odd. */
Paul E. McKenney23332102013-06-21 12:34:33 -0700112#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
113 long long dynticks_idle_nesting;
114 /* irq/process nesting level from idle. */
115 atomic_t dynticks_idle; /* Even value for idle, else odd. */
116 /* "Idle" excludes userspace execution. */
117 unsigned long dynticks_idle_jiffies;
118 /* End of last non-NMI non-idle period. */
119#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -0700120#ifdef CONFIG_RCU_FAST_NO_HZ
Paul E. McKenneyc0f4dfd2012-12-28 11:30:36 -0800121 bool all_lazy; /* Are all CPU's CBs lazy? */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -0700122 unsigned long nonlazy_posted;
123 /* # times non-lazy CBs posted to CPU. */
124 unsigned long nonlazy_posted_snap;
125 /* idle-period nonlazy_posted snapshot. */
Paul E. McKenneyc0f4dfd2012-12-28 11:30:36 -0800126 unsigned long last_accelerate;
127 /* Last jiffy CBs were accelerated. */
Paul E. McKenneyc2298282013-08-25 21:20:47 -0700128 unsigned long last_advance_all;
129 /* Last jiffy CBs were all advanced. */
Paul E. McKenney9d2ad242012-06-24 10:15:02 -0700130 int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -0700131#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700132};
133
Paul E. McKenneyd71df902011-03-29 17:48:28 -0700134/* RCU's kthread states for tracing. */
135#define RCU_KTHREAD_STOPPED 0
136#define RCU_KTHREAD_RUNNING 1
137#define RCU_KTHREAD_WAITING 2
Paul E. McKenney15ba0ba2011-04-06 16:01:16 -0700138#define RCU_KTHREAD_OFFCPU 3
139#define RCU_KTHREAD_YIELDING 4
140#define RCU_KTHREAD_MAX 4
Paul E. McKenneyd71df902011-03-29 17:48:28 -0700141
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700142/*
143 * Definition for node within the RCU grace-period-detection hierarchy.
144 */
145struct rcu_node {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800146 raw_spinlock_t lock; /* Root rcu_node's lock protects some */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700147 /* rcu_state fields as well as following. */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800148 unsigned long gpnum; /* Current grace period for this node. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700149 /* This will either be equal to or one */
150 /* behind the root rcu_node's gpnum. */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800151 unsigned long completed; /* Last GP completed for this node. */
Paul E. McKenneyd09b62d2009-11-02 13:52:28 -0800152 /* This will either be equal to or one */
153 /* behind the root rcu_node's gpnum. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700154 unsigned long qsmask; /* CPUs or groups that need to switch in */
155 /* order for current grace period to proceed.*/
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700156 /* In leaf rcu_node, each bit corresponds to */
157 /* an rcu_data structure, otherwise, each */
158 /* bit corresponds to a child rcu_node */
159 /* structure. */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800160 unsigned long expmask; /* Groups that have ->blkd_tasks */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800161 /* elements that need to drain to allow the */
162 /* current expedited grace period to */
Pranith Kumar28f65692014-09-22 14:00:48 -0400163 /* complete (only for PREEMPT_RCU). */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700164 unsigned long qsmaskinit;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800165 /* Per-GP initial value for qsmask & expmask. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800166 /* Initialized from ->qsmaskinitnext at the */
167 /* beginning of each grace period. */
168 unsigned long qsmaskinitnext;
169 /* Online CPUs for next grace period. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700170 unsigned long grpmask; /* Mask to apply to parent qsmask. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700171 /* Only one bit will be set in this mask. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700172 int grplo; /* lowest-numbered CPU or group here. */
173 int grphi; /* highest-numbered CPU or group here. */
174 u8 grpnum; /* CPU/group number for next level up. */
175 u8 level; /* root is at level 0. */
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800176 bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
177 /* exit RCU read-side critical sections */
178 /* before propagating offline up the */
179 /* rcu_node tree? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700180 struct rcu_node *parent;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800181 struct list_head blkd_tasks;
182 /* Tasks blocked in RCU read-side critical */
183 /* section. Tasks are placed at the head */
184 /* of this list and age towards the tail. */
185 struct list_head *gp_tasks;
186 /* Pointer to the first task blocking the */
187 /* current grace period, or NULL if there */
188 /* is no such task. */
189 struct list_head *exp_tasks;
190 /* Pointer to the first task blocking the */
191 /* current expedited grace period, or NULL */
192 /* if there is no such task. If there */
193 /* is no current expedited grace period, */
194 /* then there can cannot be any such task. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800195 struct list_head *boost_tasks;
196 /* Pointer to first task that needs to be */
197 /* priority boosted, or NULL if no priority */
198 /* boosting is needed for this rcu_node */
199 /* structure. If there are no tasks */
200 /* queued on this rcu_node structure that */
201 /* are blocking the current grace period, */
202 /* there can be no such task. */
Paul E. McKenneyabaa93d2014-06-12 13:30:25 -0700203 struct rt_mutex boost_mtx;
204 /* Used only for the priority-boosting */
205 /* side effect, not as a lock. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800206 unsigned long boost_time;
207 /* When to start boosting (jiffies). */
208 struct task_struct *boost_kthread_task;
209 /* kthread that takes care of priority */
210 /* boosting for this rcu_node structure. */
Paul E. McKenneyd71df902011-03-29 17:48:28 -0700211 unsigned int boost_kthread_status;
212 /* State of boost_kthread_task for tracing. */
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -0800213 unsigned long n_tasks_boosted;
214 /* Total number of tasks boosted. */
215 unsigned long n_exp_boosts;
216 /* Number of tasks boosted for expedited GP. */
217 unsigned long n_normal_boosts;
218 /* Number of tasks boosted for normal GP. */
219 unsigned long n_balk_blkd_tasks;
220 /* Refused to boost: no blocked tasks. */
221 unsigned long n_balk_exp_gp_tasks;
222 /* Refused to boost: nothing blocking GP. */
223 unsigned long n_balk_boost_tasks;
224 /* Refused to boost: already boosting. */
225 unsigned long n_balk_notblocked;
226 /* Refused to boost: RCU RS CS still running. */
227 unsigned long n_balk_notyet;
228 /* Refused to boost: not yet time. */
229 unsigned long n_balk_nos;
230 /* Refused to boost: not sure why, though. */
231 /* This can happen due to race conditions. */
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800232#ifdef CONFIG_RCU_NOCB_CPU
233 wait_queue_head_t nocb_gp_wq[2];
234 /* Place for rcu_nocb_kthread() to wait GP. */
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800235#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney8b425aa82012-12-30 13:06:35 -0800236 int need_future_gp[2];
237 /* Counts of upcoming no-CB GP requests. */
Paul E. McKenney394f2762012-06-26 17:00:35 -0700238 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700239} ____cacheline_internodealigned_in_smp;
240
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700241/*
242 * Do a full breadth-first scan of the rcu_node structures for the
243 * specified rcu_state structure.
244 */
245#define rcu_for_each_node_breadth_first(rsp, rnp) \
246 for ((rnp) = &(rsp)->node[0]; \
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700247 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700248
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800249/*
250 * Do a breadth-first scan of the non-leaf rcu_node structures for the
251 * specified rcu_state structure. Note that if there is a singleton
252 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
253 */
254#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
255 for ((rnp) = &(rsp)->node[0]; \
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700256 (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800257
258/*
259 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
260 * structure. Note that if there is a singleton rcu_node tree with but
261 * one rcu_node structure, this loop -will- visit the rcu_node structure.
262 * It is still a leaf node, even if it is also the root node.
263 */
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700264#define rcu_for_each_leaf_node(rsp, rnp) \
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700265 for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
266 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
Paul E. McKenneya0b6c9a2009-09-28 07:46:33 -0700267
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700268/* Index values for nxttail array in struct rcu_data. */
269#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
270#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
271#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
272#define RCU_NEXT_TAIL 3
273#define RCU_NEXT_SIZE 4
274
275/* Per-CPU data for read-copy update. */
276struct rcu_data {
277 /* 1) quiescent-state and grace-period handling : */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800278 unsigned long completed; /* Track rsp->completed gp number */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700279 /* in order to detect GP end. */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800280 unsigned long gpnum; /* Highest gp number that this CPU */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700281 /* is aware of having started. */
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800282 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
283 /* for rcu_all_qs() invocations. */
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700284 bool passed_quiesce; /* User-mode/idle loop etc. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700285 bool qs_pending; /* Core waits for quiesc state. */
286 bool beenonline; /* CPU online at least once. */
Paul E. McKenneye3663b12014-12-08 20:26:55 -0800287 bool gpwrap; /* Possible gpnum/completed wrap. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700288 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
289 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
Paul E. McKenneya858af22012-01-16 13:29:10 -0800290#ifdef CONFIG_RCU_CPU_STALL_INFO
291 unsigned long ticks_this_gp; /* The number of scheduling-clock */
292 /* ticks this CPU has handled */
293 /* during and after the last grace */
294 /* period it is aware of. */
295#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700296
297 /* 2) batch handling */
298 /*
299 * If nxtlist is not NULL, it is partitioned as follows.
300 * Any of the partitions might be empty, in which case the
301 * pointer to that partition will be equal to the pointer for
302 * the following partition. When the list is empty, all of
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700303 * the nxttail elements point to the ->nxtlist pointer itself,
304 * which in that case is NULL.
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700305 *
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700306 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
307 * Entries that batch # <= ->completed
308 * The grace period for these entries has completed, and
309 * the other grace-period-completed entries may be moved
310 * here temporarily in rcu_process_callbacks().
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700311 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
312 * Entries that batch # <= ->completed - 1: waiting for current GP
313 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
314 * Entries known to have arrived before current GP ended
315 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
316 * Entries that might have arrived after current GP ended
317 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
318 * always be NULL, as this is the end of the list.
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700319 */
320 struct rcu_head *nxtlist;
321 struct rcu_head **nxttail[RCU_NEXT_SIZE];
Paul E. McKenneydc35c892012-12-03 13:52:00 -0800322 unsigned long nxtcompleted[RCU_NEXT_SIZE];
323 /* grace periods for sublists. */
Paul E. McKenney486e2592012-01-06 14:11:30 -0800324 long qlen_lazy; /* # of lazy queued callbacks */
325 long qlen; /* # of queued callbacks, incl lazy */
Paul E. McKenney37c72e52009-10-14 10:15:55 -0700326 long qlen_last_fqs_check;
327 /* qlen at last check for QS forcing */
Paul E. McKenney269dcc12010-09-07 14:23:09 -0700328 unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
Paul E. McKenneyc635a4e12012-10-29 07:29:20 -0700329 unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
Lai Jiangshan29494be2010-10-20 14:13:06 +0800330 unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
331 unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
Paul E. McKenney37c72e52009-10-14 10:15:55 -0700332 unsigned long n_force_qs_snap;
333 /* did other CPU force QS recently? */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700334 long blimit; /* Upper limit on a processed batch */
335
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700336 /* 3) dynticks interface. */
337 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
338 int dynticks_snap; /* Per-GP tracking for dynticks. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700339
340 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700341 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700342 unsigned long offline_fqs; /* Kicked due to being offline. */
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700343 unsigned long cond_resched_completed;
344 /* Grace period that needs help */
345 /* from cond_resched(). */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700346
347 /* 5) __rcu_pending() statistics. */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800348 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
349 unsigned long n_rp_qs_pending;
Paul E. McKenneyd21670a2010-04-14 17:39:26 -0700350 unsigned long n_rp_report_qs;
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800351 unsigned long n_rp_cb_ready;
352 unsigned long n_rp_cpu_needs_gp;
353 unsigned long n_rp_gp_completed;
354 unsigned long n_rp_gp_started;
Paul E. McKenney96d3fd02013-10-04 14:33:34 -0700355 unsigned long n_rp_nocb_defer_wakeup;
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800356 unsigned long n_rp_need_nothing;
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700357
Paul E. McKenneyb626c1b2012-06-11 17:39:43 -0700358 /* 6) _rcu_barrier() and OOM callbacks. */
Paul E. McKenney06668ef2012-05-28 23:57:46 -0700359 struct rcu_head barrier_head;
Paul E. McKenneyb626c1b2012-06-11 17:39:43 -0700360#ifdef CONFIG_RCU_FAST_NO_HZ
361 struct rcu_head oom_head;
362#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
Paul E. McKenney06668ef2012-05-28 23:57:46 -0700363
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700364 /* 7) Callback offloading. */
365#ifdef CONFIG_RCU_NOCB_CPU
366 struct rcu_head *nocb_head; /* CBs waiting for kthread. */
367 struct rcu_head **nocb_tail;
Paul E. McKenney41050a02014-12-18 12:31:27 -0800368 atomic_long_t nocb_q_count; /* # CBs waiting for nocb */
369 atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
Paul E. McKenneyfbce7492014-06-24 09:26:11 -0700370 struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
371 struct rcu_head **nocb_follower_tail;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700372 wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
373 struct task_struct *nocb_kthread;
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700374 int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
Paul E. McKenneyfbce7492014-06-24 09:26:11 -0700375
376 /* The following fields are used by the leader, hence own cacheline. */
377 struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
378 /* CBs waiting for GP. */
379 struct rcu_head **nocb_gp_tail;
Pranith Kumar11ed7f92014-08-27 16:43:40 -0400380 bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */
Paul E. McKenneyfbce7492014-06-24 09:26:11 -0700381 struct rcu_data *nocb_next_follower;
382 /* Next follower in wakeup chain. */
383
384 /* The following fields are used by the follower, hence new cachline. */
385 struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
386 /* Leader CPU takes GP-end wakeups. */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700387#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
388
Paul E. McKenney62310692013-03-06 13:37:09 -0800389 /* 8) RCU CPU stall data. */
390#ifdef CONFIG_RCU_CPU_STALL_INFO
391 unsigned int softirq_snap; /* Snapshot of softirq activity. */
392#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
393
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700394 int cpu;
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700395 struct rcu_state *rsp;
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700396};
397
Paul E. McKenneyaf446b72011-09-10 21:54:08 -0700398/* Values for fqs_state field in struct rcu_state. */
Paul E. McKenney83f5b012009-10-28 08:14:49 -0700399#define RCU_GP_IDLE 0 /* No grace period in progress. */
400#define RCU_GP_INIT 1 /* Grace period being initialized. */
401#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
Paul E. McKenneyee47eb92010-01-04 15:09:07 -0800402#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700403#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700404
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700405/* Values for nocb_defer_wakeup field in struct rcu_data. */
406#define RCU_NOGP_WAKE_NOT 0
407#define RCU_NOGP_WAKE 1
408#define RCU_NOGP_WAKE_FORCE 2
409
Paul E. McKenney026ad282013-04-03 22:14:11 -0700410#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
411 /* For jiffies_till_first_fqs and */
412 /* and jiffies_till_next_fqs. */
Paul E. McKenney007b0922010-03-05 15:03:26 -0800413
Paul E. McKenney026ad282013-04-03 22:14:11 -0700414#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
415 /* delay between bouts of */
416 /* quiescent-state forcing. */
417
418#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
419 /* at least one scheduling clock */
420 /* irq before ratting on them. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700421
Peter Zijlstra08bca602011-05-20 16:06:29 -0700422#define rcu_wait(cond) \
423do { \
424 for (;;) { \
425 set_current_state(TASK_INTERRUPTIBLE); \
426 if (cond) \
427 break; \
428 schedule(); \
429 } \
430 __set_current_state(TASK_RUNNING); \
431} while (0)
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700432
433/*
434 * RCU global state, including node hierarchy. This hierarchy is
435 * represented in "heap" form in a dense array. The root (first level)
436 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
437 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
438 * and the third level in ->node[m+1] and following (->node[m+1] referenced
439 * by ->level[2]). The number of levels is determined by the number of
440 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
441 * consisting of a single rcu_node.
442 */
443struct rcu_state {
444 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700445 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700446 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -0700447 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700448 u8 flavor_mask; /* bit in flavor mask. */
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800449 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
Paul E. McKenney037b64e2012-05-28 23:26:01 -0700450 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
451 void (*func)(struct rcu_head *head));
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700452
453 /* The following fields are guarded by the root rcu_node's lock. */
454
Paul E. McKenneyaf446b72011-09-10 21:54:08 -0700455 u8 fqs_state ____cacheline_internodealigned_in_smp;
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700456 /* Force QS state. */
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700457 u8 boost; /* Subject to priority boost. */
Paul E. McKenney20133cf2010-02-22 17:05:01 -0800458 unsigned long gpnum; /* Current gp number. */
459 unsigned long completed; /* # of last completed gp. */
Paul E. McKenneyb3dbec72012-06-18 18:36:08 -0700460 struct task_struct *gp_kthread; /* Task for grace periods. */
461 wait_queue_head_t gp_wq; /* Where GP task waits. */
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700462 short gp_flags; /* Commands for GP task. */
463 short gp_state; /* GP kthread sleep state. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700464
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800465 /* End of fields guarded by root rcu_node's lock. */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700466
Paul E. McKenney7b2e6012012-10-08 10:54:03 -0700467 raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
468 /* Protect following fields. */
Paul E. McKenneyb1420f12012-03-01 13:18:08 -0800469 struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
470 /* need a grace period. */
471 struct rcu_head **orphan_nxttail; /* Tail of above. */
472 struct rcu_head *orphan_donelist; /* Orphaned callbacks that */
473 /* are ready to invoke. */
474 struct rcu_head **orphan_donetail; /* Tail of above. */
475 long qlen_lazy; /* Number of lazy callbacks. */
476 long qlen; /* Total number of callbacks. */
Paul E. McKenney7b2e6012012-10-08 10:54:03 -0700477 /* End of fields guarded by orphan_lock. */
Paul E. McKenneya4fbe352012-10-07 08:36:12 -0700478
Paul E. McKenney7be7f0b2012-05-29 05:18:53 -0700479 struct mutex barrier_mutex; /* Guards barrier fields. */
Paul E. McKenney24ebbca2012-05-29 00:34:56 -0700480 atomic_t barrier_cpu_count; /* # CPUs waiting on. */
Paul E. McKenney7db74df2012-05-29 03:03:37 -0700481 struct completion barrier_completion; /* Wake at barrier end. */
Paul E. McKenneycf3a9c42012-05-29 14:56:46 -0700482 unsigned long n_barrier_done; /* ++ at start and end of */
483 /* _rcu_barrier(). */
Paul E. McKenneya4fbe352012-10-07 08:36:12 -0700484 /* End of fields guarded by barrier_mutex. */
485
Paul E. McKenney40694d62012-10-11 15:24:03 -0700486 atomic_long_t expedited_start; /* Starting ticket. */
487 atomic_long_t expedited_done; /* Done ticket. */
Paul E. McKenneya30489c2012-10-11 16:18:09 -0700488 atomic_long_t expedited_wrap; /* # near-wrap incidents. */
489 atomic_long_t expedited_tryfail; /* # acquisition failures. */
490 atomic_long_t expedited_workdone1; /* # done by others #1. */
491 atomic_long_t expedited_workdone2; /* # done by others #2. */
492 atomic_long_t expedited_normal; /* # fallbacks to normal. */
493 atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
494 atomic_long_t expedited_done_tries; /* # tries to update _done. */
495 atomic_long_t expedited_done_lost; /* # times beaten to _done. */
496 atomic_long_t expedited_done_exit; /* # times exited _done loop. */
Paul E. McKenney40694d62012-10-11 15:24:03 -0700497
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700498 unsigned long jiffies_force_qs; /* Time at which to invoke */
499 /* force_quiescent_state(). */
500 unsigned long n_force_qs; /* Number of calls to */
501 /* force_quiescent_state(). */
502 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
503 /* due to lock unavailable. */
504 unsigned long n_force_qs_ngp; /* Number of calls leaving */
505 /* due to no GP active. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700506 unsigned long gp_start; /* Time at which GP started, */
507 /* but in jiffies. */
Paul E. McKenney6ccd2ec2014-12-11 10:20:59 -0800508 unsigned long gp_activity; /* Time of last GP kthread */
509 /* activity in jiffies. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700510 unsigned long jiffies_stall; /* Time at which to check */
511 /* for CPU stalls. */
Paul E. McKenney6193c762013-09-23 13:57:18 -0700512 unsigned long jiffies_resched; /* Time at which to resched */
513 /* a reluctant CPU. */
Paul E. McKenneyfc908ed2014-12-08 09:57:48 -0800514 unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */
515 /* GP start. */
Paul E. McKenney15ba0ba2011-04-06 16:01:16 -0700516 unsigned long gp_max; /* Maximum GP duration in */
517 /* jiffies. */
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400518 const char *name; /* Name of structure. */
Paul E. McKenneya4889852012-12-03 08:16:28 -0800519 char abbr; /* Abbreviated name. */
Paul E. McKenney6ce75a22012-06-12 11:01:13 -0700520 struct list_head flavors; /* List of RCU flavors. */
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700521};
522
Paul E. McKenney4cdfc172012-06-22 17:06:26 -0700523/* Values for rcu_state structure's gp_flags field. */
524#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */
525#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
526
Paul E. McKenneyafea2272014-03-12 07:10:41 -0700527/* Values for rcu_state structure's gp_flags field. */
528#define RCU_GP_WAIT_INIT 0 /* Initial state. */
529#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
530#define RCU_GP_WAIT_FQS 2 /* Wait for force-quiescent-state time. */
531
Paul E. McKenney6ce75a22012-06-12 11:01:13 -0700532extern struct list_head rcu_struct_flavors;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700533
534/* Sequence through rcu_state structures for each RCU flavor. */
Paul E. McKenney6ce75a22012-06-12 11:01:13 -0700535#define for_each_rcu_flavor(rsp) \
536 list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
537
Ingo Molnar6258c4f2009-03-25 16:42:24 +0100538/*
539 * RCU implementation internal declarations:
540 */
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700541extern struct rcu_state rcu_sched_state;
Ingo Molnar6258c4f2009-03-25 16:42:24 +0100542
543extern struct rcu_state rcu_bh_state;
Ingo Molnar6258c4f2009-03-25 16:42:24 +0100544
Pranith Kumar28f65692014-09-22 14:00:48 -0400545#ifdef CONFIG_PREEMPT_RCU
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700546extern struct rcu_state rcu_preempt_state;
Pranith Kumar28f65692014-09-22 14:00:48 -0400547#endif /* #ifdef CONFIG_PREEMPT_RCU */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700548
Paul E. McKenneyeab09932011-06-21 01:59:33 -0700549#ifdef CONFIG_RCU_BOOST
550DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
551DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
552DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
553DECLARE_PER_CPU(char, rcu_cpu_has_work);
554#endif /* #ifdef CONFIG_RCU_BOOST */
555
Paul E. McKenney017c4262010-01-14 16:10:58 -0800556#ifndef RCU_TREE_NONCORE
Paul E. McKenney9f77da92009-08-22 13:56:45 -0700557
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700558/* Forward declarations for rcutree_plugin.h */
Paul E. McKenneydbe01352009-11-10 13:37:19 -0800559static void rcu_bootup_announce(void);
Paul E. McKenney38200cf2014-10-21 12:50:04 -0700560static void rcu_preempt_note_context_switch(void);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800561static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800562#ifdef CONFIG_HOTPLUG_CPU
Paul E. McKenney8af3a5e2014-10-31 11:22:37 -0700563static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800564#endif /* #ifdef CONFIG_HOTPLUG_CPU */
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800565static void rcu_print_detail_task_stall(struct rcu_state *rsp);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700566static int rcu_print_task_stall(struct rcu_node *rnp);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700567static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
Paul E. McKenney86aea0e2014-10-21 08:12:00 -0700568static void rcu_preempt_check_callbacks(void);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700569void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700570static void __init __rcu_init_preempt(void);
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700571static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700572static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
573static void invoke_rcu_callbacks_kthread(void);
Paul E. McKenneydff16722011-11-29 15:57:13 -0800574static bool rcu_is_callbacks_kthread(void);
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700575#ifdef CONFIG_RCU_BOOST
576static void rcu_preempt_do_callbacks(void);
Paul Gortmaker49fb4c62013-06-19 14:52:21 -0400577static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +0000578 struct rcu_node *rnp);
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700579#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney9386c0b2014-07-13 12:00:53 -0700580static void __init rcu_spawn_boost_kthreads(void);
Paul Gortmaker49fb4c62013-06-19 14:52:21 -0400581static void rcu_prepare_kthreads(int cpu);
Paul E. McKenney8fa7845d2014-10-22 15:07:37 -0700582static void rcu_cleanup_after_idle(void);
Paul E. McKenney198bbf82014-10-22 15:03:43 -0700583static void rcu_prepare_for_idle(void);
Paul E. McKenneyc57afe82012-02-28 11:02:21 -0800584static void rcu_idle_count_callbacks_posted(void);
Paul E. McKenney0aa04b02015-01-23 21:52:37 -0800585static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
Paul E. McKenneya858af22012-01-16 13:29:10 -0800586static void print_cpu_stall_info_begin(void);
587static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
588static void print_cpu_stall_info_end(void);
589static void zero_cpu_stall_ticks(struct rcu_data *rdp);
590static void increment_cpu_stall_ticks(void);
Paul E. McKenneyd7e29932014-10-27 09:15:54 -0700591static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800592static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
Paul E. McKenney0446be42012-12-30 15:21:01 -0800593static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
Paul E. McKenneydae6e642013-02-10 20:48:58 -0800594static void rcu_init_one_nocb(struct rcu_node *rnp);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700595static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
Paul E. McKenney96d3fd02013-10-04 14:33:34 -0700596 bool lazy, unsigned long flags);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700597static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
Paul E. McKenney96d3fd02013-10-04 14:33:34 -0700598 struct rcu_data *rdp,
599 unsigned long flags);
Paul E. McKenney9fdd3bc2014-07-29 14:50:47 -0700600static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
Paul E. McKenney96d3fd02013-10-04 14:33:34 -0700601static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700602static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
Paul E. McKenney35ce7f22014-07-11 11:30:24 -0700603static void rcu_spawn_all_nocb_kthreads(int cpu);
604static void __init rcu_spawn_nocb_kthreads(void);
605#ifdef CONFIG_RCU_NOCB_CPU
606static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
607#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney4a81e832014-06-20 16:49:01 -0700608static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
Paul E. McKenney34ed62462013-01-07 13:37:42 -0800609static bool init_nocb_callback_list(struct rcu_data *rdp);
Christoph Lameter28ced792014-09-02 14:13:44 -0700610static void rcu_sysidle_enter(int irq);
611static void rcu_sysidle_exit(int irq);
Paul E. McKenney0edd1b12013-06-21 16:37:22 -0700612static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
613 unsigned long *maxj);
614static bool is_sysidle_rcu_state(struct rcu_state *rsp);
615static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
616 unsigned long maxj);
Paul E. McKenneyeb757672013-06-21 17:10:40 -0700617static void rcu_bind_gp_kthread(void);
Paul E. McKenney23332102013-06-21 12:34:33 -0700618static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
Paul E. McKenneya0969322013-11-08 09:03:10 -0800619static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700620static void rcu_dynticks_task_enter(void);
621static void rcu_dynticks_task_exit(void);
Paul E. McKenney9b2619a2009-09-23 09:50:43 -0700622
Paul E. McKenney017c4262010-01-14 16:10:58 -0800623#endif /* #ifndef RCU_TREE_NONCORE */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700624
625#ifdef CONFIG_RCU_TRACE
Paul E. McKenney41050a02014-12-18 12:31:27 -0800626/* Read out queue lengths for tracing. */
627static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
628{
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700629#ifdef CONFIG_RCU_NOCB_CPU
Paul E. McKenney41050a02014-12-18 12:31:27 -0800630 *ql = atomic_long_read(&rdp->nocb_q_count);
631 *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700632#else /* #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700633 *ql = 0;
634 *qll = 0;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700635#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney41050a02014-12-18 12:31:27 -0800636}
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700637#endif /* #ifdef CONFIG_RCU_TRACE */