blob: c77424be284d503c241a36d290af3a4c6e0ef98b [file] [log] [blame]
Steven Rostedt81d68a92008-05-12 21:20:42 +02001/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05002 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02003 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020024static DEFINE_PER_CPU(int, tracing_cpu);
25
Steven Rostedt89b2f972008-05-12 21:20:44 +020026static DEFINE_SPINLOCK(max_trace_lock);
27
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020028enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
Steven Rostedte9d25fe2009-03-04 22:15:30 -050035static int save_lat_flag;
36
Jiri Olsa62b915f2010-04-02 19:01:22 +020037static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020040#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020041static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020042preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020051static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020052irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
Jiri Olsa62b915f2010-04-02 19:01:22 +020061#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
Steven Rostedt81d68a92008-05-12 21:20:42 +020078/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
Lucas De Marchi25985ed2011-03-30 22:57:33 -030083 * and what happens together happens separately as well, so this doesn't
Steven Rostedt81d68a92008-05-12 21:20:42 +020084 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
Steven Rostedt606576c2008-10-06 19:06:12 -040088#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020089/*
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040090 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
Steven Rostedt81d68a92008-05-12 21:20:42 +0200101 */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400102static int func_prolog_dec(struct trace_array *tr,
103 struct trace_array_cpu **data,
104 unsigned long *flags)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200105{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200106 long disabled;
107 int cpu;
108
Steven Rostedt361943a2008-05-12 21:20:44 +0200109 /*
110 * Does not matter if we preempt. We test the flags
111 * afterward, to see if irqs are disabled or not.
112 * If we preempt and get a false positive, the flags
113 * test will fail.
114 */
115 cpu = raw_smp_processor_id();
116 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400117 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200118
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400119 local_save_flags(*flags);
Steven Rostedt361943a2008-05-12 21:20:44 +0200120 /* slight chance to get a false positive on tracing_cpu */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400121 if (!irqs_disabled_flags(*flags))
122 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200123
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400124 *data = tr->data[cpu];
125 disabled = atomic_inc_return(&(*data)->disabled);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200126
127 if (likely(disabled == 1))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140{
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
143 unsigned long flags;
144
145 if (!func_prolog_dec(tr, &data, &flags))
146 return;
147
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200149
150 atomic_dec(&data->disabled);
151}
152
153static struct ftrace_ops trace_ops __read_mostly =
154{
155 .func = irqsoff_tracer_call,
Steven Rostedtb8489142011-05-04 09:27:52 -0400156 .flags = FTRACE_OPS_FL_GLOBAL,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200157};
Steven Rostedt606576c2008-10-06 19:06:12 -0400158#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200159
Jiri Olsa62b915f2010-04-02 19:01:22 +0200160#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
162{
163 int cpu;
164
165 if (!(bit & TRACE_DISPLAY_GRAPH))
166 return -EINVAL;
167
168 if (!(is_graph() ^ set))
169 return 0;
170
171 stop_irqsoff_tracer(irqsoff_trace, !set);
172
173 for_each_possible_cpu(cpu)
174 per_cpu(tracing_cpu, cpu) = 0;
175
176 tracing_max_latency = 0;
177 tracing_reset_online_cpus(irqsoff_trace);
178
179 return start_irqsoff_tracer(irqsoff_trace, set);
180}
181
182static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
183{
184 struct trace_array *tr = irqsoff_trace;
185 struct trace_array_cpu *data;
186 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200187 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200188 int pc;
189
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400190 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200191 return 0;
192
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400193 pc = preempt_count();
194 ret = __trace_graph_entry(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200195 atomic_dec(&data->disabled);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400196
Jiri Olsa62b915f2010-04-02 19:01:22 +0200197 return ret;
198}
199
200static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
201{
202 struct trace_array *tr = irqsoff_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200205 int pc;
206
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400207 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200208 return;
209
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400210 pc = preempt_count();
211 __trace_graph_return(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200212 atomic_dec(&data->disabled);
213}
214
215static void irqsoff_trace_open(struct trace_iterator *iter)
216{
217 if (is_graph())
218 graph_trace_open(iter);
219
220}
221
222static void irqsoff_trace_close(struct trace_iterator *iter)
223{
224 if (iter->private)
225 graph_trace_close(iter);
226}
227
228#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
229 TRACE_GRAPH_PRINT_PROC)
230
231static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
232{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200233 /*
234 * In graph mode call the graph tracer output function,
235 * otherwise go with the TRACE_FN event handler
236 */
237 if (is_graph())
Jiri Olsa0a772622010-09-23 14:00:52 +0200238 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200239
240 return TRACE_TYPE_UNHANDLED;
241}
242
243static void irqsoff_print_header(struct seq_file *s)
244{
Jiri Olsa0a772622010-09-23 14:00:52 +0200245 if (is_graph())
246 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
247 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200248 trace_default_header(s);
249}
250
251static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200252__trace_function(struct trace_array *tr,
253 unsigned long ip, unsigned long parent_ip,
254 unsigned long flags, int pc)
255{
Jiri Olsa0a772622010-09-23 14:00:52 +0200256 if (is_graph())
257 trace_graph_function(tr, ip, parent_ip, flags, pc);
258 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200259 trace_function(tr, ip, parent_ip, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200260}
261
262#else
263#define __trace_function trace_function
264
265static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
266{
267 return -EINVAL;
268}
269
270static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
271{
272 return -1;
273}
274
275static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
276{
277 return TRACE_TYPE_UNHANDLED;
278}
279
280static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
281static void irqsoff_print_header(struct seq_file *s) { }
282static void irqsoff_trace_open(struct trace_iterator *iter) { }
283static void irqsoff_trace_close(struct trace_iterator *iter) { }
284#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
285
Steven Rostedt81d68a92008-05-12 21:20:42 +0200286/*
287 * Should this new latency be reported/recorded?
288 */
Ingo Molnare309b412008-05-12 21:20:51 +0200289static int report_latency(cycle_t delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200290{
291 if (tracing_thresh) {
292 if (delta < tracing_thresh)
293 return 0;
294 } else {
295 if (delta <= tracing_max_latency)
296 return 0;
297 }
298 return 1;
299}
300
Ingo Molnare309b412008-05-12 21:20:51 +0200301static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200302check_critical_timing(struct trace_array *tr,
303 struct trace_array_cpu *data,
304 unsigned long parent_ip,
305 int cpu)
306{
Steven Rostedt89b2f972008-05-12 21:20:44 +0200307 cycle_t T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200308 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400309 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200310
Steven Rostedt81d68a92008-05-12 21:20:42 +0200311 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200312 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200313 delta = T1-T0;
314
315 local_save_flags(flags);
316
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400317 pc = preempt_count();
318
Steven Rostedt81d68a92008-05-12 21:20:42 +0200319 if (!report_latency(delta))
320 goto out;
321
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200322 spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200323
Steven Rostedt89b2f972008-05-12 21:20:44 +0200324 /* check if we are still the max latency */
325 if (!report_latency(delta))
326 goto out_unlock;
327
Jiri Olsa62b915f2010-04-02 19:01:22 +0200328 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500329 /* Skip 5 functions to get to the irq/preempt enable function */
330 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200331
Steven Rostedt81d68a92008-05-12 21:20:42 +0200332 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200333 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200334
Steven Rostedt81d68a92008-05-12 21:20:42 +0200335 data->critical_end = parent_ip;
336
Carsten Emdeb5130b12009-09-13 01:43:07 +0200337 if (likely(!is_tracing_stopped())) {
338 tracing_max_latency = delta;
339 update_max_tr_single(tr, current, cpu);
340 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200341
Steven Rostedt81d68a92008-05-12 21:20:42 +0200342 max_sequence++;
343
Steven Rostedt89b2f972008-05-12 21:20:44 +0200344out_unlock:
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200345 spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200346
Steven Rostedt81d68a92008-05-12 21:20:42 +0200347out:
348 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200349 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200350 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200351}
352
Ingo Molnare309b412008-05-12 21:20:51 +0200353static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200354start_critical_timing(unsigned long ip, unsigned long parent_ip)
355{
356 int cpu;
357 struct trace_array *tr = irqsoff_trace;
358 struct trace_array_cpu *data;
359 unsigned long flags;
360
361 if (likely(!tracer_enabled))
362 return;
363
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200364 cpu = raw_smp_processor_id();
365
366 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200367 return;
368
Steven Rostedt81d68a92008-05-12 21:20:42 +0200369 data = tr->data[cpu];
370
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200371 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200372 return;
373
374 atomic_inc(&data->disabled);
375
376 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200377 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200378 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200379
380 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200381
Jiri Olsa62b915f2010-04-02 19:01:22 +0200382 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200383
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200384 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200385
Steven Rostedt81d68a92008-05-12 21:20:42 +0200386 atomic_dec(&data->disabled);
387}
388
Ingo Molnare309b412008-05-12 21:20:51 +0200389static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200390stop_critical_timing(unsigned long ip, unsigned long parent_ip)
391{
392 int cpu;
393 struct trace_array *tr = irqsoff_trace;
394 struct trace_array_cpu *data;
395 unsigned long flags;
396
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200397 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200398 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200399 if (unlikely(per_cpu(tracing_cpu, cpu)))
400 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200401 else
402 return;
403
404 if (!tracer_enabled)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200405 return;
406
Steven Rostedt81d68a92008-05-12 21:20:42 +0200407 data = tr->data[cpu];
408
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400409 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200410 !data->critical_start || atomic_read(&data->disabled))
411 return;
412
413 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200414
Steven Rostedt81d68a92008-05-12 21:20:42 +0200415 local_save_flags(flags);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200416 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200417 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200418 data->critical_start = 0;
419 atomic_dec(&data->disabled);
420}
421
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200422/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200423void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200424{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200425 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200426 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
427}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200428EXPORT_SYMBOL_GPL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200429
Ingo Molnare309b412008-05-12 21:20:51 +0200430void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200431{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200432 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200433 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
434}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200435EXPORT_SYMBOL_GPL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200436
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200437#ifdef CONFIG_IRQSOFF_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +0200438#ifdef CONFIG_PROVE_LOCKING
Ingo Molnare309b412008-05-12 21:20:51 +0200439void time_hardirqs_on(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200440{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200441 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200442 stop_critical_timing(a0, a1);
443}
444
Ingo Molnare309b412008-05-12 21:20:51 +0200445void time_hardirqs_off(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200446{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200447 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200448 start_critical_timing(a0, a1);
449}
450
451#else /* !CONFIG_PROVE_LOCKING */
452
453/*
454 * Stubs:
455 */
456
Steven Rostedt81d68a92008-05-12 21:20:42 +0200457void trace_softirqs_on(unsigned long ip)
458{
459}
460
461void trace_softirqs_off(unsigned long ip)
462{
463}
464
Ingo Molnare309b412008-05-12 21:20:51 +0200465inline void print_irqtrace_events(struct task_struct *curr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200466{
467}
468
469/*
470 * We are only interested in hardirq on/off events:
471 */
Ingo Molnare309b412008-05-12 21:20:51 +0200472void trace_hardirqs_on(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200473{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200474 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200475 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
476}
477EXPORT_SYMBOL(trace_hardirqs_on);
478
Ingo Molnare309b412008-05-12 21:20:51 +0200479void trace_hardirqs_off(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200480{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200481 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200482 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
483}
484EXPORT_SYMBOL(trace_hardirqs_off);
485
Ingo Molnare309b412008-05-12 21:20:51 +0200486void trace_hardirqs_on_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200487{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200488 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200489 stop_critical_timing(CALLER_ADDR0, caller_addr);
490}
491EXPORT_SYMBOL(trace_hardirqs_on_caller);
492
Ingo Molnare309b412008-05-12 21:20:51 +0200493void trace_hardirqs_off_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200494{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200495 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200496 start_critical_timing(CALLER_ADDR0, caller_addr);
497}
498EXPORT_SYMBOL(trace_hardirqs_off_caller);
499
500#endif /* CONFIG_PROVE_LOCKING */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200501#endif /* CONFIG_IRQSOFF_TRACER */
502
503#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +0200504void trace_preempt_on(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200505{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400506 if (preempt_trace())
507 stop_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200508}
509
Ingo Molnare309b412008-05-12 21:20:51 +0200510void trace_preempt_off(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200511{
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400512 if (preempt_trace())
513 start_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200514}
515#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200516
Jiri Olsa62b915f2010-04-02 19:01:22 +0200517static int start_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200518{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200519 int ret = 0;
520
521 if (!graph)
522 ret = register_ftrace_function(&trace_ops);
523 else
524 ret = register_ftrace_graph(&irqsoff_graph_return,
525 &irqsoff_graph_entry);
526
527 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500528 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500529 else
Steven Rostedt90369902008-11-05 16:05:44 -0500530 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200531
532 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200533}
534
Jiri Olsa62b915f2010-04-02 19:01:22 +0200535static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200536{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200537 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200538
539 if (!graph)
540 unregister_ftrace_function(&trace_ops);
541 else
542 unregister_ftrace_graph();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200543}
544
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200545static void __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200546{
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500547 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
548 trace_flags |= TRACE_ITER_LATENCY_FMT;
549
Steven Rostedt745b1622009-01-15 23:40:11 -0500550 tracing_max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200551 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200552 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200553 smp_wmb();
Steven Rostedt2f26ebd2009-09-01 11:06:29 -0400554 tracing_reset_online_cpus(tr);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200555
556 if (start_irqsoff_tracer(tr, is_graph()))
557 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt81d68a92008-05-12 21:20:42 +0200558}
559
560static void irqsoff_tracer_reset(struct trace_array *tr)
561{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200562 stop_irqsoff_tracer(tr, is_graph());
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500563
564 if (!save_lat_flag)
565 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200566}
567
Steven Rostedt90369902008-11-05 16:05:44 -0500568static void irqsoff_tracer_start(struct trace_array *tr)
569{
Steven Rostedt90369902008-11-05 16:05:44 -0500570 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500571}
572
573static void irqsoff_tracer_stop(struct trace_array *tr)
574{
575 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200576}
577
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200578#ifdef CONFIG_IRQSOFF_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100579static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200580{
581 trace_type = TRACER_IRQS_OFF;
582
583 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100584 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200585}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200586static struct tracer irqsoff_tracer __read_mostly =
587{
588 .name = "irqsoff",
589 .init = irqsoff_tracer_init,
590 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500591 .start = irqsoff_tracer_start,
592 .stop = irqsoff_tracer_stop,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200593 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200594 .print_header = irqsoff_print_header,
595 .print_line = irqsoff_print_line,
596 .flags = &tracer_flags,
597 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200598#ifdef CONFIG_FTRACE_SELFTEST
599 .selftest = trace_selftest_startup_irqsoff,
600#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200601 .open = irqsoff_trace_open,
602 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900603 .use_max_tr = 1,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200604};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200605# define register_irqsoff(trace) register_tracer(&trace)
606#else
607# define register_irqsoff(trace) do { } while (0)
608#endif
609
610#ifdef CONFIG_PREEMPT_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100611static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200612{
613 trace_type = TRACER_PREEMPT_OFF;
614
615 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100616 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200617}
618
619static struct tracer preemptoff_tracer __read_mostly =
620{
621 .name = "preemptoff",
622 .init = preemptoff_tracer_init,
623 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500624 .start = irqsoff_tracer_start,
625 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200626 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200627 .print_header = irqsoff_print_header,
628 .print_line = irqsoff_print_line,
629 .flags = &tracer_flags,
630 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200631#ifdef CONFIG_FTRACE_SELFTEST
632 .selftest = trace_selftest_startup_preemptoff,
633#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200634 .open = irqsoff_trace_open,
635 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900636 .use_max_tr = 1,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200637};
638# define register_preemptoff(trace) register_tracer(&trace)
639#else
640# define register_preemptoff(trace) do { } while (0)
641#endif
642
643#if defined(CONFIG_IRQSOFF_TRACER) && \
644 defined(CONFIG_PREEMPT_TRACER)
645
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100646static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200647{
648 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
649
650 __irqsoff_tracer_init(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100651 return 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200652}
653
654static struct tracer preemptirqsoff_tracer __read_mostly =
655{
656 .name = "preemptirqsoff",
657 .init = preemptirqsoff_tracer_init,
658 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500659 .start = irqsoff_tracer_start,
660 .stop = irqsoff_tracer_stop,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200661 .print_max = 1,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200662 .print_header = irqsoff_print_header,
663 .print_line = irqsoff_print_line,
664 .flags = &tracer_flags,
665 .set_flag = irqsoff_set_flag,
Steven Rostedt60a11772008-05-12 21:20:44 +0200666#ifdef CONFIG_FTRACE_SELFTEST
667 .selftest = trace_selftest_startup_preemptirqsoff,
668#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200669 .open = irqsoff_trace_open,
670 .close = irqsoff_trace_close,
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900671 .use_max_tr = 1,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200672};
673
674# define register_preemptirqsoff(trace) register_tracer(&trace)
675#else
676# define register_preemptirqsoff(trace) do { } while (0)
677#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200678
679__init static int init_irqsoff_tracer(void)
680{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200681 register_irqsoff(irqsoff_tracer);
682 register_preemptoff(preemptoff_tracer);
683 register_preemptirqsoff(preemptirqsoff_tracer);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200684
685 return 0;
686}
687device_initcall(init_irqsoff_tracer);