blob: 6d4c6ce212753025c6b1000277e282979bf57720 [file] [log] [blame]
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +01001/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010017#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010020#include <linux/hardirq.h>
Frederic Weisbecker6a616712012-12-16 20:00:34 +010021#include <linux/export.h>
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +000022#include <linux/kprobes.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010023
Frederic Weisbecker1b6a2592013-07-11 20:27:43 +020024#define CREATE_TRACE_POINTS
25#include <trace/events/context_tracking.h>
26
Frederic Weisbecker65f382f2013-07-11 19:12:32 +020027struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
Frederic Weisbecker48d6a812013-07-10 02:44:35 +020028EXPORT_SYMBOL_GPL(context_tracking_enabled);
Frederic Weisbecker65f382f2013-07-11 19:12:32 +020029
30DEFINE_PER_CPU(struct context_tracking, context_tracking);
Frederic Weisbecker48d6a812013-07-10 02:44:35 +020031EXPORT_SYMBOL_GPL(context_tracking);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010032
Frederic Weisbeckeraed5ed42015-05-06 18:04:23 +020033static bool context_tracking_recursion_enter(void)
34{
35 int recursion;
36
37 recursion = __this_cpu_inc_return(context_tracking.recursion);
38 if (recursion == 1)
39 return true;
40
41 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion);
42 __this_cpu_dec(context_tracking.recursion);
43
44 return false;
45}
46
47static void context_tracking_recursion_exit(void)
48{
49 __this_cpu_dec(context_tracking.recursion);
50}
51
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010052/**
Rik van Riel3aab4f52015-02-10 15:27:50 -050053 * context_tracking_enter - Inform the context tracking that the CPU is going
54 * enter user or guest space mode.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010055 *
56 * This function must be called right before we switch from the kernel
Rik van Riel3aab4f52015-02-10 15:27:50 -050057 * to user or guest space, when it's guaranteed the remaining kernel
58 * instructions to execute won't use any RCU read side critical section
59 * because this function sets RCU in extended quiescent state.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010060 */
Rik van Riel3aab4f52015-02-10 15:27:50 -050061void context_tracking_enter(enum ctx_state state)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010062{
63 unsigned long flags;
64
65 /*
66 * Some contexts may involve an exception occuring in an irq,
67 * leading to that nesting:
68 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
69 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
70 * helpers are enough to protect RCU uses inside the exception. So
71 * just return immediately if we detect we are in an IRQ.
72 */
73 if (in_interrupt())
74 return;
75
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010076 /* Kernel threads aren't supposed to go to userspace */
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010077 WARN_ON_ONCE(!current->mm);
78
79 local_irq_save(flags);
Frederic Weisbeckeraed5ed42015-05-06 18:04:23 +020080 if (!context_tracking_recursion_enter())
81 goto out_irq_restore;
82
Rik van Riel3aab4f52015-02-10 15:27:50 -050083 if ( __this_cpu_read(context_tracking.state) != state) {
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020084 if (__this_cpu_read(context_tracking.active)) {
85 /*
86 * At this stage, only low level arch entry code remains and
87 * then we'll run in userspace. We can assume there won't be
88 * any RCU read-side critical section until the next call to
89 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
90 * on the tick.
91 */
Rik van Riel19fdd982015-02-10 15:27:52 -050092 if (state == CONTEXT_USER) {
93 trace_user_enter(0);
94 vtime_user_enter(current);
95 }
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020096 rcu_user_enter();
97 }
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010098 /*
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020099 * Even if context tracking is disabled on this CPU, because it's outside
100 * the full dynticks mask for example, we still have to keep track of the
101 * context transitions and states to prevent inconsistency on those of
102 * other CPUs.
103 * If a task triggers an exception in userspace, sleep on the exception
104 * handler and then migrate to another CPU, that new CPU must know where
105 * the exception returns by the time we call exception_exit().
106 * This information can only be provided by the previous CPU when it called
107 * exception_enter().
108 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
109 * is false because we know that CPU is not tickless.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100110 */
Rik van Riel3aab4f52015-02-10 15:27:50 -0500111 __this_cpu_write(context_tracking.state, state);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100112 }
Frederic Weisbeckeraed5ed42015-05-06 18:04:23 +0200113 context_tracking_recursion_exit();
114out_irq_restore:
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100115 local_irq_restore(flags);
116}
Rik van Riel3aab4f52015-02-10 15:27:50 -0500117NOKPROBE_SYMBOL(context_tracking_enter);
Rik van Rielefc1e2c2015-02-10 15:27:53 -0500118EXPORT_SYMBOL_GPL(context_tracking_enter);
Rik van Riel3aab4f52015-02-10 15:27:50 -0500119
120void context_tracking_user_enter(void)
121{
Paolo Bonzinif70cd6b2015-10-28 02:39:55 +0100122 user_enter();
Rik van Riel3aab4f52015-02-10 15:27:50 -0500123}
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +0000124NOKPROBE_SYMBOL(context_tracking_user_enter);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100125
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100126/**
Rik van Riel3aab4f52015-02-10 15:27:50 -0500127 * context_tracking_exit - Inform the context tracking that the CPU is
128 * exiting user or guest mode and entering the kernel.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100129 *
Rik van Riel3aab4f52015-02-10 15:27:50 -0500130 * This function must be called after we entered the kernel from user or
131 * guest space before any use of RCU read side critical section. This
132 * potentially include any high level kernel code like syscalls, exceptions,
133 * signal handling, etc...
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100134 *
135 * This call supports re-entrancy. This way it can be called from any exception
136 * handler without needing to know if we came from userspace or not.
137 */
Rik van Riel3aab4f52015-02-10 15:27:50 -0500138void context_tracking_exit(enum ctx_state state)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100139{
140 unsigned long flags;
141
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100142 if (in_interrupt())
143 return;
144
145 local_irq_save(flags);
Frederic Weisbeckeraed5ed42015-05-06 18:04:23 +0200146 if (!context_tracking_recursion_enter())
147 goto out_irq_restore;
148
Rik van Riel3aab4f52015-02-10 15:27:50 -0500149 if (__this_cpu_read(context_tracking.state) == state) {
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +0200150 if (__this_cpu_read(context_tracking.active)) {
151 /*
152 * We are going to run code that may use RCU. Inform
153 * RCU core about that (ie: we may need the tick again).
154 */
155 rcu_user_exit();
Rik van Riel19fdd982015-02-10 15:27:52 -0500156 if (state == CONTEXT_USER) {
157 vtime_user_exit(current);
158 trace_user_exit(0);
159 }
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +0200160 }
Frederic Weisbeckerc467ea72015-03-04 18:06:33 +0100161 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100162 }
Frederic Weisbeckeraed5ed42015-05-06 18:04:23 +0200163 context_tracking_recursion_exit();
164out_irq_restore:
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100165 local_irq_restore(flags);
166}
Rik van Riel3aab4f52015-02-10 15:27:50 -0500167NOKPROBE_SYMBOL(context_tracking_exit);
Rik van Rielefc1e2c2015-02-10 15:27:53 -0500168EXPORT_SYMBOL_GPL(context_tracking_exit);
Rik van Riel3aab4f52015-02-10 15:27:50 -0500169
170void context_tracking_user_exit(void)
171{
Paolo Bonzinif70cd6b2015-10-28 02:39:55 +0100172 user_exit();
Rik van Riel3aab4f52015-02-10 15:27:50 -0500173}
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +0000174NOKPROBE_SYMBOL(context_tracking_user_exit);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100175
Frederic Weisbeckerfafe8702015-05-06 18:04:24 +0200176void __init context_tracking_cpu_set(int cpu)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100177{
Frederic Weisbeckerfafe8702015-05-06 18:04:24 +0200178 static __initdata bool initialized = false;
179
180 if (!per_cpu(context_tracking.active, cpu)) {
181 per_cpu(context_tracking.active, cpu) = true;
182 static_key_slow_inc(&context_tracking_enabled);
183 }
184
185 if (initialized)
186 return;
187
188 /*
189 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork
190 * This assumes that init is the only task at this early boot stage.
191 */
192 set_tsk_thread_flag(&init_task, TIF_NOHZ);
193 WARN_ON_ONCE(!tasklist_empty());
194
195 initialized = true;
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100196}
Frederic Weisbecker65f382f2013-07-11 19:12:32 +0200197
198#ifdef CONFIG_CONTEXT_TRACKING_FORCE
199void __init context_tracking_init(void)
200{
201 int cpu;
202
203 for_each_possible_cpu(cpu)
204 context_tracking_cpu_set(cpu);
205}
206#endif