blob: 14183b8f79c5ba765c1080804a1cc1b67cfdf4e4 [file] [log] [blame]
Steven Rostedt81d68a92008-05-12 21:20:42 +02001/*
2 * trace irqs off criticall timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020024static DEFINE_PER_CPU(int, tracing_cpu);
25
Steven Rostedt89b2f972008-05-12 21:20:44 +020026static DEFINE_SPINLOCK(max_trace_lock);
27
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020028enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
35#ifdef CONFIG_PREEMPT_TRACER
36static inline int notrace
37preempt_trace(void)
38{
39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
40}
41#else
42# define preempt_trace() (0)
43#endif
44
45#ifdef CONFIG_IRQSOFF_TRACER
46static inline int notrace
47irq_trace(void)
48{
49 return ((trace_type & TRACER_IRQS_OFF) &&
50 irqs_disabled());
51}
52#else
53# define irq_trace() (0)
54#endif
55
Steven Rostedt81d68a92008-05-12 21:20:42 +020056/*
57 * Sequence count - we record it when starting a measurement and
58 * skip the latency if the sequence has changed - some other section
59 * did a maximum and could disturb our measurement with serial console
60 * printouts, etc. Truly coinciding maximum latencies should be rare
61 * and what happens together happens separately as well, so this doesnt
62 * decrease the validity of the maximum found:
63 */
64static __cacheline_aligned_in_smp unsigned long max_sequence;
65
66#ifdef CONFIG_FTRACE
67/*
68 * irqsoff uses its own tracer function to keep the overhead down:
69 */
70static void notrace
71irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72{
73 struct trace_array *tr = irqsoff_trace;
74 struct trace_array_cpu *data;
75 unsigned long flags;
76 long disabled;
77 int cpu;
78
Steven Rostedt361943a2008-05-12 21:20:44 +020079 /*
80 * Does not matter if we preempt. We test the flags
81 * afterward, to see if irqs are disabled or not.
82 * If we preempt and get a false positive, the flags
83 * test will fail.
84 */
85 cpu = raw_smp_processor_id();
86 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt81d68a92008-05-12 21:20:42 +020087 return;
88
89 local_save_flags(flags);
Steven Rostedt361943a2008-05-12 21:20:44 +020090 /* slight chance to get a false positive on tracing_cpu */
91 if (!irqs_disabled_flags(flags))
92 return;
Steven Rostedt81d68a92008-05-12 21:20:42 +020093
Steven Rostedt81d68a92008-05-12 21:20:42 +020094 data = tr->data[cpu];
95 disabled = atomic_inc_return(&data->disabled);
96
97 if (likely(disabled == 1))
98 ftrace(tr, data, ip, parent_ip, flags);
99
100 atomic_dec(&data->disabled);
101}
102
103static struct ftrace_ops trace_ops __read_mostly =
104{
105 .func = irqsoff_tracer_call,
106};
107#endif /* CONFIG_FTRACE */
108
109/*
110 * Should this new latency be reported/recorded?
111 */
112static int notrace report_latency(cycle_t delta)
113{
114 if (tracing_thresh) {
115 if (delta < tracing_thresh)
116 return 0;
117 } else {
118 if (delta <= tracing_max_latency)
119 return 0;
120 }
121 return 1;
122}
123
124static void notrace
125check_critical_timing(struct trace_array *tr,
126 struct trace_array_cpu *data,
127 unsigned long parent_ip,
128 int cpu)
129{
130 unsigned long latency, t0, t1;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200131 cycle_t T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200132 unsigned long flags;
133
134 /*
135 * usecs conversion is slow so we try to delay the conversion
136 * as long as possible:
137 */
138 T0 = data->preempt_timestamp;
139 T1 = now(cpu);
140 delta = T1-T0;
141
142 local_save_flags(flags);
143
144 if (!report_latency(delta))
145 goto out;
146
Steven Rostedt89b2f972008-05-12 21:20:44 +0200147 spin_lock(&max_trace_lock);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200148
Steven Rostedt89b2f972008-05-12 21:20:44 +0200149 /* check if we are still the max latency */
150 if (!report_latency(delta))
151 goto out_unlock;
152
153 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200154
155 latency = nsecs_to_usecs(delta);
156
157 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200158 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200159
160 tracing_max_latency = delta;
161 t0 = nsecs_to_usecs(T0);
162 t1 = nsecs_to_usecs(T1);
163
164 data->critical_end = parent_ip;
165
166 update_max_tr_single(tr, current, cpu);
167
168 if (tracing_thresh)
169 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
170 "violates %lu us threshold.\n"
171 " => started at timestamp %lu: ",
172 current->comm, current->pid,
173 raw_smp_processor_id(),
174 latency, nsecs_to_usecs(tracing_thresh), t0);
175 else
176 printk(KERN_INFO "(%16s-%-5d|#%d):"
177 " new %lu us maximum-latency "
178 "critical section.\n => started at timestamp %lu: ",
179 current->comm, current->pid,
180 raw_smp_processor_id(),
181 latency, t0);
182
183 print_symbol(KERN_CONT "<%s>\n", data->critical_start);
184 printk(KERN_CONT " => ended at timestamp %lu: ", t1);
185 print_symbol(KERN_CONT "<%s>\n", data->critical_end);
186 dump_stack();
187 t1 = nsecs_to_usecs(now(cpu));
188 printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
189
190 max_sequence++;
191
Steven Rostedt89b2f972008-05-12 21:20:44 +0200192out_unlock:
193 spin_unlock(&max_trace_lock);
194
Steven Rostedt81d68a92008-05-12 21:20:42 +0200195out:
196 data->critical_sequence = max_sequence;
197 data->preempt_timestamp = now(cpu);
198 tracing_reset(data);
199 ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
200}
201
202static inline void notrace
203start_critical_timing(unsigned long ip, unsigned long parent_ip)
204{
205 int cpu;
206 struct trace_array *tr = irqsoff_trace;
207 struct trace_array_cpu *data;
208 unsigned long flags;
209
210 if (likely(!tracer_enabled))
211 return;
212
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200213 if (__get_cpu_var(tracing_cpu))
214 return;
215
Steven Rostedt81d68a92008-05-12 21:20:42 +0200216 cpu = raw_smp_processor_id();
217 data = tr->data[cpu];
218
219 if (unlikely(!data) || unlikely(!data->trace) ||
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200220 atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200221 return;
222
223 atomic_inc(&data->disabled);
224
225 data->critical_sequence = max_sequence;
226 data->preempt_timestamp = now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200227 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200228 tracing_reset(data);
229
230 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200231
Steven Rostedt81d68a92008-05-12 21:20:42 +0200232 ftrace(tr, data, ip, parent_ip, flags);
233
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200234 __get_cpu_var(tracing_cpu) = 1;
235
Steven Rostedt81d68a92008-05-12 21:20:42 +0200236 atomic_dec(&data->disabled);
237}
238
239static inline void notrace
240stop_critical_timing(unsigned long ip, unsigned long parent_ip)
241{
242 int cpu;
243 struct trace_array *tr = irqsoff_trace;
244 struct trace_array_cpu *data;
245 unsigned long flags;
246
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200247 /* Always clear the tracing cpu on stopping the trace */
248 if (unlikely(__get_cpu_var(tracing_cpu)))
249 __get_cpu_var(tracing_cpu) = 0;
250 else
251 return;
252
253 if (!tracer_enabled)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200254 return;
255
256 cpu = raw_smp_processor_id();
257 data = tr->data[cpu];
258
259 if (unlikely(!data) || unlikely(!data->trace) ||
260 !data->critical_start || atomic_read(&data->disabled))
261 return;
262
263 atomic_inc(&data->disabled);
264 local_save_flags(flags);
265 ftrace(tr, data, ip, parent_ip, flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200266 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200267 data->critical_start = 0;
268 atomic_dec(&data->disabled);
269}
270
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200271/* start and stop critical timings used to for stoppage (in idle) */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200272void notrace start_critical_timings(void)
273{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200274 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200275 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
276}
277
278void notrace stop_critical_timings(void)
279{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200280 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200281 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
282}
283
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200284#ifdef CONFIG_IRQSOFF_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +0200285#ifdef CONFIG_PROVE_LOCKING
286void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
287{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200288 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200289 stop_critical_timing(a0, a1);
290}
291
292void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
293{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200294 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200295 start_critical_timing(a0, a1);
296}
297
298#else /* !CONFIG_PROVE_LOCKING */
299
300/*
301 * Stubs:
302 */
303
304void early_boot_irqs_off(void)
305{
306}
307
308void early_boot_irqs_on(void)
309{
310}
311
312void trace_softirqs_on(unsigned long ip)
313{
314}
315
316void trace_softirqs_off(unsigned long ip)
317{
318}
319
320inline void print_irqtrace_events(struct task_struct *curr)
321{
322}
323
324/*
325 * We are only interested in hardirq on/off events:
326 */
327void notrace trace_hardirqs_on(void)
328{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200329 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200330 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
331}
332EXPORT_SYMBOL(trace_hardirqs_on);
333
334void notrace trace_hardirqs_off(void)
335{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200336 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200337 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
338}
339EXPORT_SYMBOL(trace_hardirqs_off);
340
341void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
342{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200343 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200344 stop_critical_timing(CALLER_ADDR0, caller_addr);
345}
346EXPORT_SYMBOL(trace_hardirqs_on_caller);
347
348void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
349{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200350 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200351 start_critical_timing(CALLER_ADDR0, caller_addr);
352}
353EXPORT_SYMBOL(trace_hardirqs_off_caller);
354
355#endif /* CONFIG_PROVE_LOCKING */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200356#endif /* CONFIG_IRQSOFF_TRACER */
357
358#ifdef CONFIG_PREEMPT_TRACER
359void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
360{
361 stop_critical_timing(a0, a1);
362}
363
364void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
365{
366 start_critical_timing(a0, a1);
367}
368#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200369
370static void start_irqsoff_tracer(struct trace_array *tr)
371{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200372 register_ftrace_function(&trace_ops);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200373 tracer_enabled = 1;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200374}
375
376static void stop_irqsoff_tracer(struct trace_array *tr)
377{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200378 tracer_enabled = 0;
Steven Rostedt89b2f972008-05-12 21:20:44 +0200379 unregister_ftrace_function(&trace_ops);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200380}
381
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200382static void __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200383{
384 irqsoff_trace = tr;
385 /* make sure that the tracer is visibel */
386 smp_wmb();
387
388 if (tr->ctrl)
389 start_irqsoff_tracer(tr);
390}
391
392static void irqsoff_tracer_reset(struct trace_array *tr)
393{
394 if (tr->ctrl)
395 stop_irqsoff_tracer(tr);
396}
397
398static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
399{
400 if (tr->ctrl)
401 start_irqsoff_tracer(tr);
402 else
403 stop_irqsoff_tracer(tr);
404}
405
406static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
407{
408 /* stop the trace while dumping */
409 if (iter->tr->ctrl)
410 stop_irqsoff_tracer(iter->tr);
411}
412
413static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
414{
415 if (iter->tr->ctrl)
416 start_irqsoff_tracer(iter->tr);
417}
418
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200419#ifdef CONFIG_IRQSOFF_TRACER
420static void irqsoff_tracer_init(struct trace_array *tr)
421{
422 trace_type = TRACER_IRQS_OFF;
423
424 __irqsoff_tracer_init(tr);
425}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200426static struct tracer irqsoff_tracer __read_mostly =
427{
428 .name = "irqsoff",
429 .init = irqsoff_tracer_init,
430 .reset = irqsoff_tracer_reset,
431 .open = irqsoff_tracer_open,
432 .close = irqsoff_tracer_close,
433 .ctrl_update = irqsoff_tracer_ctrl_update,
434 .print_max = 1,
Steven Rostedt60a11772008-05-12 21:20:44 +0200435#ifdef CONFIG_FTRACE_SELFTEST
436 .selftest = trace_selftest_startup_irqsoff,
437#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200438};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200439# define register_irqsoff(trace) register_tracer(&trace)
440#else
441# define register_irqsoff(trace) do { } while (0)
442#endif
443
444#ifdef CONFIG_PREEMPT_TRACER
445static void preemptoff_tracer_init(struct trace_array *tr)
446{
447 trace_type = TRACER_PREEMPT_OFF;
448
449 __irqsoff_tracer_init(tr);
450}
451
452static struct tracer preemptoff_tracer __read_mostly =
453{
454 .name = "preemptoff",
455 .init = preemptoff_tracer_init,
456 .reset = irqsoff_tracer_reset,
457 .open = irqsoff_tracer_open,
458 .close = irqsoff_tracer_close,
459 .ctrl_update = irqsoff_tracer_ctrl_update,
460 .print_max = 1,
Steven Rostedt60a11772008-05-12 21:20:44 +0200461#ifdef CONFIG_FTRACE_SELFTEST
462 .selftest = trace_selftest_startup_preemptoff,
463#endif
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200464};
465# define register_preemptoff(trace) register_tracer(&trace)
466#else
467# define register_preemptoff(trace) do { } while (0)
468#endif
469
470#if defined(CONFIG_IRQSOFF_TRACER) && \
471 defined(CONFIG_PREEMPT_TRACER)
472
473static void preemptirqsoff_tracer_init(struct trace_array *tr)
474{
475 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
476
477 __irqsoff_tracer_init(tr);
478}
479
480static struct tracer preemptirqsoff_tracer __read_mostly =
481{
482 .name = "preemptirqsoff",
483 .init = preemptirqsoff_tracer_init,
484 .reset = irqsoff_tracer_reset,
485 .open = irqsoff_tracer_open,
486 .close = irqsoff_tracer_close,
487 .ctrl_update = irqsoff_tracer_ctrl_update,
488 .print_max = 1,
Steven Rostedt60a11772008-05-12 21:20:44 +0200489#ifdef CONFIG_FTRACE_SELFTEST
490 .selftest = trace_selftest_startup_preemptirqsoff,
491#endif
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200492};
493
494# define register_preemptirqsoff(trace) register_tracer(&trace)
495#else
496# define register_preemptirqsoff(trace) do { } while (0)
497#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200498
499__init static int init_irqsoff_tracer(void)
500{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200501 register_irqsoff(irqsoff_tracer);
502 register_preemptoff(preemptoff_tracer);
503 register_preemptirqsoff(preemptirqsoff_tracer);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200504
505 return 0;
506}
507device_initcall(init_irqsoff_tracer);