blob: 3225c3a9d028e49c8cf495c26a427e120138324a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
Thomas Gleixner892d9882018-11-25 19:33:39 +010011#include <linux/sched/smt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/unistd.h>
13#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070014#include <linux/oom.h>
15#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040016#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070017#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/kthread.h>
19#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070020#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010022#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053023#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053024#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000025#include <linux/irq.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070026#include <trace/events/power.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Thomas Gleixner38498a62012-04-20 13:05:44 +000028#include "smpboot.h"
29
Rusty Russell98a79d62008-12-13 21:19:41 +103030#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103031/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070032static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070034/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053035 * The following two APIs (cpu_maps_update_begin/done) must be used when
36 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
37 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
38 * hotplug callback (un)registration performed using __register_cpu_notifier()
39 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070040 */
41void cpu_maps_update_begin(void)
42{
43 mutex_lock(&cpu_add_remove_lock);
44}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053045EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070046
47void cpu_maps_update_done(void)
48{
49 mutex_unlock(&cpu_add_remove_lock);
50}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053051EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070052
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010053static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070055/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
56 * Should always be manipulated under cpu_add_remove_lock
57 */
58static int cpu_hotplug_disabled;
59
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070060#ifdef CONFIG_HOTPLUG_CPU
61
Gautham R Shenoyd2219382008-01-25 21:08:01 +010062static struct {
63 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +010064 /* wait queue to wake up the active_writer */
65 wait_queue_head_t wq;
66 /* verifies that no writer will get active while readers are active */
67 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010068 /*
69 * Also blocks the new readers during
70 * an ongoing cpu hotplug operation.
71 */
David Hildenbrand87af9e72014-12-12 10:11:44 +010072 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053073
74#ifdef CONFIG_DEBUG_LOCK_ALLOC
75 struct lockdep_map dep_map;
76#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070077} cpu_hotplug = {
78 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +010079 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -070080 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053081#ifdef CONFIG_DEBUG_LOCK_ALLOC
82 .dep_map = {.name = "cpu_hotplug.lock" },
83#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070084};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010085
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053086/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
87#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -070088#define cpuhp_lock_acquire_tryread() \
89 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053090#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
91#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
92
Paul E. McKenney62db99f2014-10-22 14:51:49 -070093
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010094void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080095{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010096 might_sleep();
97 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070098 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053099 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100100 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100101 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100102 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800103}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100104EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800105
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100106void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800107{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100108 int refcount;
109
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100110 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700111 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700112
David Hildenbrand87af9e72014-12-12 10:11:44 +0100113 refcount = atomic_dec_return(&cpu_hotplug.refcount);
114 if (WARN_ON(refcount < 0)) /* try to fix things up */
115 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700116
David Hildenbrand87af9e72014-12-12 10:11:44 +0100117 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
118 wake_up(&cpu_hotplug.wq);
119
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530120 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100121
Ashok Raja9d9baa2005-11-28 13:43:46 -0800122}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100123EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800124
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100125/*
126 * This ensures that the hotplug operation can begin only when the
127 * refcount goes to zero.
128 *
129 * Note that during a cpu-hotplug operation, the new readers, if any,
130 * will be blocked by the cpu_hotplug.lock
131 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700132 * Since cpu_hotplug_begin() is always called after invoking
133 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100134 *
135 * Note that theoretically, there is a possibility of a livelock:
136 * - Refcount goes to zero, last reader wakes up the sleeping
137 * writer.
138 * - Last reader unlocks the cpu_hotplug.lock.
139 * - A new reader arrives at this moment, bumps up the refcount.
140 * - The writer acquires the cpu_hotplug.lock finds the refcount
141 * non zero and goes to sleep again.
142 *
143 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100144 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100145 *
146 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600147void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100148{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100149 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700150
David Hildenbrand87af9e72014-12-12 10:11:44 +0100151 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530152 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100153
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700154 for (;;) {
155 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100156 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
157 if (likely(!atomic_read(&cpu_hotplug.refcount)))
158 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100159 mutex_unlock(&cpu_hotplug.lock);
160 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100161 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100162 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100163}
164
Toshi Kanib9d10be2013-08-12 09:45:53 -0600165void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100166{
167 cpu_hotplug.active_writer = NULL;
168 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530169 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100170}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700171
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700172/*
173 * Wait for currently running CPU hotplug operations to complete (if any) and
174 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
175 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
176 * hotplug path before performing hotplug operations. So acquiring that lock
177 * guarantees mutual exclusion from any currently running hotplug operations.
178 */
179void cpu_hotplug_disable(void)
180{
181 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700182 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700183 cpu_maps_update_done();
184}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700185EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700186
Lianwei Wangd200cc92016-06-09 23:43:28 -0700187static void __cpu_hotplug_enable(void)
188{
189 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
190 return;
191 cpu_hotplug_disabled--;
192}
193
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700194void cpu_hotplug_enable(void)
195{
196 cpu_maps_update_begin();
Lianwei Wangd200cc92016-06-09 23:43:28 -0700197 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700198 cpu_maps_update_done();
199}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700200EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600201#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700202
Thomas Gleixner892d9882018-11-25 19:33:39 +0100203/*
204 * Architectures that need SMT-specific errata handling during SMT hotplug
205 * should override this.
206 */
207void __weak arch_smt_update(void) { }
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209/* Need to know about CPUs going up/down? */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200210int register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Neil Brownbd5349c2006-10-17 00:10:35 -0700212 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100213 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700214 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100215 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700216 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700218
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200219int __register_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530220{
221 return raw_notifier_chain_register(&cpu_chain, nb);
222}
223
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700224static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
225 int *nr_calls)
226{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700227 int ret;
228
229 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700230 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700231
232 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700233}
234
235static int cpu_notify(unsigned long val, void *v)
236{
237 return __cpu_notify(val, v, -1, NULL);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530241EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200243void unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100245 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700246 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100247 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248}
249EXPORT_SYMBOL(unregister_cpu_notifier);
250
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200251void __unregister_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530252{
253 raw_notifier_chain_unregister(&cpu_chain, nb);
254}
255EXPORT_SYMBOL(__unregister_cpu_notifier);
256
Michal Hocko1c0f4e02016-12-07 14:54:38 +0100257#ifdef CONFIG_HOTPLUG_CPU
Arnd Bergmann56ef5872017-01-09 11:47:50 +0100258static void cpu_notify_nofail(unsigned long val, void *v)
259{
260 BUG_ON(cpu_notify(val, v));
261}
262
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700263/**
264 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
265 * @cpu: a CPU id
266 *
267 * This function walks all processes, finds a valid mm struct for each one and
268 * then clears a corresponding bit in mm's cpumask. While this all sounds
269 * trivial, there are various non-obvious corner cases, which this function
270 * tries to solve in a safe manner.
271 *
272 * Also note that the function uses a somewhat relaxed locking scheme, so it may
273 * be called only for an already offlined CPU.
274 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700275void clear_tasks_mm_cpumask(int cpu)
276{
277 struct task_struct *p;
278
279 /*
280 * This function is called after the cpu is taken down and marked
281 * offline, so its not like new tasks will ever get this cpu set in
282 * their mm mask. -- Peter Zijlstra
283 * Thus, we may use rcu_read_lock() here, instead of grabbing
284 * full-fledged tasklist_lock.
285 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700286 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700287 rcu_read_lock();
288 for_each_process(p) {
289 struct task_struct *t;
290
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700291 /*
292 * Main thread might exit, but other threads may still have
293 * a valid mm. Find one.
294 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700295 t = find_lock_task_mm(p);
296 if (!t)
297 continue;
298 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
299 task_unlock(t);
300 }
301 rcu_read_unlock();
302}
303
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400304static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400306 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Oleg Nesterova75a6062015-09-10 15:07:50 +0200308 read_lock(&tasklist_lock);
309 for_each_process_thread(g, p) {
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400310 if (!p->on_rq)
311 continue;
312 /*
313 * We do the check with unlocked task_rq(p)->lock.
314 * Order the reading to do not warn about a task,
315 * which was running on this cpu in the past, and
316 * it's just been woken on another cpu.
317 */
318 rmb();
319 if (task_cpu(p) != dead_cpu)
320 continue;
321
322 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
323 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
Oleg Nesterova75a6062015-09-10 15:07:50 +0200324 }
325 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Avi Kivitydb912f92007-05-24 12:23:10 +0300328struct take_cpu_down_param {
329 unsigned long mod;
330 void *hcpu;
331};
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200334static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Avi Kivitydb912f92007-05-24 12:23:10 +0300336 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 int err;
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* Ensure this CPU doesn't handle any more interrupts. */
340 err = __cpu_disable();
341 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700342 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700344 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200345 /* Give up timekeeping duties */
346 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000347 /* Park the stopper thread */
Oleg Nesterov233e7f22015-10-08 16:51:31 +0200348 stop_machine_park((long)param->hcpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700349 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350}
351
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700352/* Requires cpu_add_remove_lock to be held */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200353static int _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700355 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700356 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700357 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300358 struct take_cpu_down_param tcd_param = {
359 .mod = mod,
360 .hcpu = hcpu,
361 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700363 if (num_online_cpus() == 1)
364 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700366 if (!cpu_online(cpu))
367 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100369 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700370
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700371 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700372 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700373 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700374 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700375 pr_warn("%s: attempt to take down CPU %u failed\n",
376 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700377 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200380 /*
381 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
382 * and RCU users of this state to go away such that all new such users
383 * will observe it.
384 *
385 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
Paul E. McKenney779de6c2015-06-10 13:34:41 -0700386 * not imply sync_sched(), so wait for both.
Michael wang106dd5a2013-11-13 11:10:56 +0800387 *
388 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200389 */
Paul E. McKenney779de6c2015-06-10 13:34:41 -0700390 if (IS_ENABLED(CONFIG_PREEMPT))
391 synchronize_rcu_mult(call_rcu, call_rcu_sched);
392 else
393 synchronize_rcu();
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200394
Michael wang106dd5a2013-11-13 11:10:56 +0800395 smpboot_park_threads(cpu);
396
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200397 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000398 * Prevent irq alloc/free while the dying cpu reorganizes the
399 * interrupt affinities.
400 */
401 irq_lock_sparse();
402
403 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200404 * So now all preempt/rcu users must observe !cpu_active().
405 */
Oleg Nesterov7eeb0882015-06-30 03:29:51 +0200406 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500407 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 /* CPU didn't die: tell everyone. Can't complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700409 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Thomas Gleixnera8994182015-07-05 17:12:30 +0000410 irq_unlock_sparse();
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100411 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700412 }
Rusty Russell04321582008-07-28 12:16:29 -0500413 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100415 /*
416 * The migration_call() CPU_DYING callback will have removed all
417 * runnable tasks from the cpu, there's only the idle task left now
418 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100419 *
420 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100421 */
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800422 while (!per_cpu(cpu_dead_idle, cpu))
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100423 cpu_relax();
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800424 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
425 per_cpu(cpu_dead_idle, cpu) = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Thomas Gleixnera8994182015-07-05 17:12:30 +0000427 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
428 irq_unlock_sparse();
429
Preeti U Murthy345527b2015-03-30 14:59:19 +0530430 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 /* This actually kills the CPU. */
432 __cpu_die(cpu);
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 /* CPU is completely dead: tell everyone. Too late to complain. */
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200435 tick_cleanup_dead_cpu(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700436 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 check_for_tasks(cpu);
439
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700440out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100441 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700442 if (!err)
443 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Thomas Gleixner892d9882018-11-25 19:33:39 +0100444 arch_smt_update();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700445 return err;
446}
447
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200448int cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700449{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100450 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700451
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100452 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700453
Max Krasnyanskye761b772008-07-15 04:43:49 -0700454 if (cpu_hotplug_disabled) {
455 err = -EBUSY;
456 goto out;
457 }
458
Max Krasnyanskye761b772008-07-15 04:43:49 -0700459 err = _cpu_down(cpu, 0);
460
Max Krasnyanskye761b772008-07-15 04:43:49 -0700461out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100462 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 return err;
464}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400465EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466#endif /*CONFIG_HOTPLUG_CPU*/
467
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700468/*
469 * Unpark per-CPU smpboot kthreads at CPU-online time.
470 */
471static int smpboot_thread_call(struct notifier_block *nfb,
472 unsigned long action, void *hcpu)
473{
474 int cpu = (long)hcpu;
475
476 switch (action & ~CPU_TASKS_FROZEN) {
477
Paul E. McKenney64eaf9742015-04-15 12:45:41 -0700478 case CPU_DOWN_FAILED:
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700479 case CPU_ONLINE:
480 smpboot_unpark_threads(cpu);
481 break;
482
483 default:
484 break;
485 }
486
487 return NOTIFY_OK;
488}
489
490static struct notifier_block smpboot_thread_notifier = {
491 .notifier_call = smpboot_thread_call,
492 .priority = CPU_PRI_SMPBOOT,
493};
494
Paul Gortmaker927da9d2015-04-27 18:47:58 -0400495void smpboot_thread_init(void)
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700496{
497 register_cpu_notifier(&smpboot_thread_notifier);
498}
499
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700500/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400501static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700503 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700505 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700506 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100508 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000509
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200510 if (cpu_online(cpu) || !cpu_present(cpu)) {
511 ret = -EINVAL;
512 goto out;
513 }
514
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700515 idle = idle_thread_get(cpu);
516 if (IS_ERR(idle)) {
517 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000518 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700519 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000520
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000521 ret = smpboot_create_threads(cpu);
522 if (ret)
523 goto out;
524
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700525 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700526 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700527 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700528 pr_warn("%s: attempt to bring up CPU %u failed\n",
529 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 goto out_notify;
531 }
532
533 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700534 ret = __cpu_up(cpu, idle);
Thomas Gleixnera8994182015-07-05 17:12:30 +0000535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 if (ret != 0)
537 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100538 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700541 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543out_notify:
544 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700545 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000546out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100547 cpu_hotplug_done();
Thomas Gleixner892d9882018-11-25 19:33:39 +0100548 arch_smt_update();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 return ret;
550}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700551
Paul Gortmaker0db06282013-06-19 14:53:51 -0400552int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700553{
554 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700555
Rusty Russelle0b582e2009-01-01 10:12:28 +1030556 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700557 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
558 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800559#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700560 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700561#endif
562 return -EINVAL;
563 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700564
Toshi Kani01b0f192013-11-12 15:07:25 -0800565 err = try_online_node(cpu_to_node(cpu));
566 if (err)
567 return err;
minskey guocf234222010-05-24 14:32:41 -0700568
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100569 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700570
Max Krasnyanskye761b772008-07-15 04:43:49 -0700571 if (cpu_hotplug_disabled) {
572 err = -EBUSY;
573 goto out;
574 }
575
576 err = _cpu_up(cpu, 0);
577
Max Krasnyanskye761b772008-07-15 04:43:49 -0700578out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100579 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700580 return err;
581}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800582EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700583
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700584#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030585static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700586
587int disable_nonboot_cpus(void)
588{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200589 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700590
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100591 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030592 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100593 /*
594 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700595 * with the userspace trying to use the CPU hotplug at the same time
596 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030597 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100598
Fabian Frederick84117da2014-06-04 16:11:17 -0700599 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700600 for_each_online_cpu(cpu) {
601 if (cpu == first_cpu)
602 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700603 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700604 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700605 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600606 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030607 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600608 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700609 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700610 break;
611 }
612 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700613
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700614 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700615 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700616 else
Fabian Frederick84117da2014-06-04 16:11:17 -0700617 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700618
619 /*
620 * Make sure the CPUs won't be enabled by someone else. We need to do
621 * this even in case of failure as all disable_nonboot_cpus() users are
622 * supposed to do enable_nonboot_cpus() on the failure path.
623 */
624 cpu_hotplug_disabled++;
625
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100626 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700627 return error;
628}
629
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700630void __weak arch_enable_nonboot_cpus_begin(void)
631{
632}
633
634void __weak arch_enable_nonboot_cpus_end(void)
635{
636}
637
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200638void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700639{
640 int cpu, error;
641
642 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100643 cpu_maps_update_begin();
Lianwei Wangd200cc92016-06-09 23:43:28 -0700644 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030645 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700646 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700647
Fabian Frederick84117da2014-06-04 16:11:17 -0700648 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700649
650 arch_enable_nonboot_cpus_begin();
651
Rusty Russelle0b582e2009-01-01 10:12:28 +1030652 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700653 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700654 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700655 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700656 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700657 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700658 continue;
659 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700660 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700661 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700662
663 arch_enable_nonboot_cpus_end();
664
Rusty Russelle0b582e2009-01-01 10:12:28 +1030665 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700666out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100667 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700668}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030669
Fenghua Yud7268a32011-11-15 21:59:31 +0100670static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030671{
672 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
673 return -ENOMEM;
674 return 0;
675}
676core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100677
678/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100679 * When callbacks for CPU hotplug notifications are being executed, we must
680 * ensure that the state of the system with respect to the tasks being frozen
681 * or not, as reported by the notification, remains unchanged *throughout the
682 * duration* of the execution of the callbacks.
683 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
684 *
685 * This synchronization is implemented by mutually excluding regular CPU
686 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
687 * Hibernate notifications.
688 */
689static int
690cpu_hotplug_pm_callback(struct notifier_block *nb,
691 unsigned long action, void *ptr)
692{
693 switch (action) {
694
695 case PM_SUSPEND_PREPARE:
696 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700697 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100698 break;
699
700 case PM_POST_SUSPEND:
701 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700702 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100703 break;
704
705 default:
706 return NOTIFY_DONE;
707 }
708
709 return NOTIFY_OK;
710}
711
712
Fenghua Yud7268a32011-11-15 21:59:31 +0100713static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100714{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800715 /*
716 * cpu_hotplug_pm_callback has higher priority than x86
717 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
718 * to disable cpu hotplug to avoid cpu hotplug race.
719 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100720 pm_notifier(cpu_hotplug_pm_callback, 0);
721 return 0;
722}
723core_initcall(cpu_hotplug_pm_sync_init);
724
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700725#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700726
Manfred Spraule545a612008-09-07 16:57:22 +0200727/**
728 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
729 * @cpu: cpu that just started
730 *
731 * This function calls the cpu_chain notifiers with CPU_STARTING.
732 * It must be called by the arch code on the new cpu, before the new cpu
733 * enables interrupts and before the "boot" cpu returns from __cpu_up().
734 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400735void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200736{
737 unsigned long val = CPU_STARTING;
738
739#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030740 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200741 val = CPU_STARTING_FROZEN;
742#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700743 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200744}
745
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700746#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700747
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700748/*
749 * cpu_bit_bitmap[] is a special, "compressed" data structure that
750 * represents all NR_CPUS bits binary values of 1<<nr.
751 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030752 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700753 * mask value that has a single bit set only.
754 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700755
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700756/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700757#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700758#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
759#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
760#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700761
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700762const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700763
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700764 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
765 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
766#if BITS_PER_LONG > 32
767 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
768 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700769#endif
770};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700771EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100772
773const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
774EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030775
776#ifdef CONFIG_INIT_ALL_POSSIBLE
777static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
778 = CPU_BITS_ALL;
779#else
780static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
781#endif
782const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
783EXPORT_SYMBOL(cpu_possible_mask);
784
785static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
786const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
787EXPORT_SYMBOL(cpu_online_mask);
788
789static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
790const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
791EXPORT_SYMBOL(cpu_present_mask);
792
793static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
794const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
795EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030796
797void set_cpu_possible(unsigned int cpu, bool possible)
798{
799 if (possible)
800 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
801 else
802 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
803}
804
805void set_cpu_present(unsigned int cpu, bool present)
806{
807 if (present)
808 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
809 else
810 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
811}
812
813void set_cpu_online(unsigned int cpu, bool online)
814{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800815 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030816 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800817 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
818 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030819 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800820 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030821}
822
823void set_cpu_active(unsigned int cpu, bool active)
824{
825 if (active)
826 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
827 else
828 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
829}
830
831void init_cpu_present(const struct cpumask *src)
832{
833 cpumask_copy(to_cpumask(cpu_present_bits), src);
834}
835
836void init_cpu_possible(const struct cpumask *src)
837{
838 cpumask_copy(to_cpumask(cpu_possible_bits), src);
839}
840
841void init_cpu_online(const struct cpumask *src)
842{
843 cpumask_copy(to_cpumask(cpu_online_bits), src);
844}
Josh Poimboeuf31a2c5f2019-04-12 15:39:28 -0500845
846enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO;
847
848static int __init mitigations_parse_cmdline(char *arg)
849{
850 if (!strcmp(arg, "off"))
851 cpu_mitigations = CPU_MITIGATIONS_OFF;
852 else if (!strcmp(arg, "auto"))
853 cpu_mitigations = CPU_MITIGATIONS_AUTO;
854
855 return 0;
856}
857early_param("mitigations", mitigations_parse_cmdline);