rcu: rcu_barrier VS cpu_hotplug: Ensure callbacks in dead cpu are migrated to online cpu
[linux-2.6.git] / kernel / rcupdate.c
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *          Manfred Spraul <manfred@colorfullife.com>
22  * 
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  * Papers:
26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28  *
29  * For detailed explanation of Read-Copy Update mechanism see -
30  *              http://lse.sourceforge.net/locking/rcupdate.html
31  *
32  */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <asm/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/module.h>
47 #include <linux/kernel_stat.h>
48
49 enum rcu_barrier {
50         RCU_BARRIER_STD,
51         RCU_BARRIER_BH,
52         RCU_BARRIER_SCHED,
53 };
54
55 static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
56 static atomic_t rcu_barrier_cpu_count;
57 static DEFINE_MUTEX(rcu_barrier_mutex);
58 static struct completion rcu_barrier_completion;
59 int rcu_scheduler_active __read_mostly;
60
61 /*
62  * Awaken the corresponding synchronize_rcu() instance now that a
63  * grace period has elapsed.
64  */
65 void wakeme_after_rcu(struct rcu_head  *head)
66 {
67         struct rcu_synchronize *rcu;
68
69         rcu = container_of(head, struct rcu_synchronize, head);
70         complete(&rcu->completion);
71 }
72
73 /**
74  * synchronize_rcu - wait until a grace period has elapsed.
75  *
76  * Control will return to the caller some time after a full grace
77  * period has elapsed, in other words after all currently executing RCU
78  * read-side critical sections have completed.  RCU read-side critical
79  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
80  * and may be nested.
81  */
82 void synchronize_rcu(void)
83 {
84         struct rcu_synchronize rcu;
85
86         if (rcu_blocking_is_gp())
87                 return;
88
89         init_completion(&rcu.completion);
90         /* Will wake me after RCU finished. */
91         call_rcu(&rcu.head, wakeme_after_rcu);
92         /* Wait for it. */
93         wait_for_completion(&rcu.completion);
94 }
95 EXPORT_SYMBOL_GPL(synchronize_rcu);
96
97 static void rcu_barrier_callback(struct rcu_head *notused)
98 {
99         if (atomic_dec_and_test(&rcu_barrier_cpu_count))
100                 complete(&rcu_barrier_completion);
101 }
102
103 /*
104  * Called with preemption disabled, and from cross-cpu IRQ context.
105  */
106 static void rcu_barrier_func(void *type)
107 {
108         int cpu = smp_processor_id();
109         struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
110
111         atomic_inc(&rcu_barrier_cpu_count);
112         switch ((enum rcu_barrier)type) {
113         case RCU_BARRIER_STD:
114                 call_rcu(head, rcu_barrier_callback);
115                 break;
116         case RCU_BARRIER_BH:
117                 call_rcu_bh(head, rcu_barrier_callback);
118                 break;
119         case RCU_BARRIER_SCHED:
120                 call_rcu_sched(head, rcu_barrier_callback);
121                 break;
122         }
123 }
124
125 static inline void wait_migrated_callbacks(void);
126
127 /*
128  * Orchestrate the specified type of RCU barrier, waiting for all
129  * RCU callbacks of the specified type to complete.
130  */
131 static void _rcu_barrier(enum rcu_barrier type)
132 {
133         BUG_ON(in_interrupt());
134         /* Take cpucontrol mutex to protect against CPU hotplug */
135         mutex_lock(&rcu_barrier_mutex);
136         init_completion(&rcu_barrier_completion);
137         /*
138          * Initialize rcu_barrier_cpu_count to 1, then invoke
139          * rcu_barrier_func() on each CPU, so that each CPU also has
140          * incremented rcu_barrier_cpu_count.  Only then is it safe to
141          * decrement rcu_barrier_cpu_count -- otherwise the first CPU
142          * might complete its grace period before all of the other CPUs
143          * did their increment, causing this function to return too
144          * early.
145          */
146         atomic_set(&rcu_barrier_cpu_count, 1);
147         on_each_cpu(rcu_barrier_func, (void *)type, 1);
148         if (atomic_dec_and_test(&rcu_barrier_cpu_count))
149                 complete(&rcu_barrier_completion);
150         wait_for_completion(&rcu_barrier_completion);
151         mutex_unlock(&rcu_barrier_mutex);
152         wait_migrated_callbacks();
153 }
154
155 /**
156  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
157  */
158 void rcu_barrier(void)
159 {
160         _rcu_barrier(RCU_BARRIER_STD);
161 }
162 EXPORT_SYMBOL_GPL(rcu_barrier);
163
164 /**
165  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
166  */
167 void rcu_barrier_bh(void)
168 {
169         _rcu_barrier(RCU_BARRIER_BH);
170 }
171 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
172
173 /**
174  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
175  */
176 void rcu_barrier_sched(void)
177 {
178         _rcu_barrier(RCU_BARRIER_SCHED);
179 }
180 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
181
182 static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183 static struct rcu_head rcu_migrate_head[3];
184 static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186 static void rcu_migrate_callback(struct rcu_head *notused)
187 {
188         if (atomic_dec_and_test(&rcu_migrate_type_count))
189                 wake_up(&rcu_migrate_wq);
190 }
191
192 static inline void wait_migrated_callbacks(void)
193 {
194         wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195 }
196
197 static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198                 unsigned long action, void *hcpu)
199 {
200         if (action == CPU_DYING) {
201                 /*
202                  * preempt_disable() in on_each_cpu() prevents stop_machine(),
203                  * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
204                  * returns, all online cpus have queued rcu_barrier_func(),
205                  * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
206                  *
207                  * These callbacks ensure _rcu_barrier() waits for all
208                  * RCU callbacks of the specified type to complete.
209                  */
210                 atomic_set(&rcu_migrate_type_count, 3);
211                 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
212                 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
213                 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
214         } else if (action == CPU_POST_DEAD) {
215                 /* rcu_migrate_head is protected by cpu_add_remove_lock */
216                 wait_migrated_callbacks();
217         }
218
219         return NOTIFY_OK;
220 }
221
222 void __init rcu_init(void)
223 {
224         __rcu_init();
225         hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
226 }
227
228 void rcu_scheduler_starting(void)
229 {
230         WARN_ON(num_online_cpus() != 1);
231         WARN_ON(nr_context_switches() > 0);
232         rcu_scheduler_active = 1;
233 }