ftrace: trace preempt off critical timings
[linux-2.6.git] / kernel / trace / trace_irqsoff.c
1 /*
2  * trace irqs off criticall timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static struct trace_array               *irqsoff_trace __read_mostly;
22 static int                              tracer_enabled __read_mostly;
23
24 static DEFINE_PER_CPU(int, tracing_cpu);
25
26 enum {
27         TRACER_IRQS_OFF         = (1 << 1),
28         TRACER_PREEMPT_OFF      = (1 << 2),
29 };
30
31 static int trace_type __read_mostly;
32
33 #ifdef CONFIG_PREEMPT_TRACER
34 static inline int notrace
35 preempt_trace(void)
36 {
37         return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
38 }
39 #else
40 # define preempt_trace() (0)
41 #endif
42
43 #ifdef CONFIG_IRQSOFF_TRACER
44 static inline int notrace
45 irq_trace(void)
46 {
47         return ((trace_type & TRACER_IRQS_OFF) &&
48                 irqs_disabled());
49 }
50 #else
51 # define irq_trace() (0)
52 #endif
53
54 /*
55  * Sequence count - we record it when starting a measurement and
56  * skip the latency if the sequence has changed - some other section
57  * did a maximum and could disturb our measurement with serial console
58  * printouts, etc. Truly coinciding maximum latencies should be rare
59  * and what happens together happens separately as well, so this doesnt
60  * decrease the validity of the maximum found:
61  */
62 static __cacheline_aligned_in_smp       unsigned long max_sequence;
63
64 #ifdef CONFIG_FTRACE
65 /*
66  * irqsoff uses its own tracer function to keep the overhead down:
67  */
68 static void notrace
69 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
70 {
71         struct trace_array *tr = irqsoff_trace;
72         struct trace_array_cpu *data;
73         unsigned long flags;
74         long disabled;
75         int cpu;
76
77         if (likely(!__get_cpu_var(tracing_cpu)))
78                 return;
79
80         local_save_flags(flags);
81
82         cpu = raw_smp_processor_id();
83         data = tr->data[cpu];
84         disabled = atomic_inc_return(&data->disabled);
85
86         if (likely(disabled == 1))
87                 ftrace(tr, data, ip, parent_ip, flags);
88
89         atomic_dec(&data->disabled);
90 }
91
92 static struct ftrace_ops trace_ops __read_mostly =
93 {
94         .func = irqsoff_tracer_call,
95 };
96 #endif /* CONFIG_FTRACE */
97
98 /*
99  * Should this new latency be reported/recorded?
100  */
101 static int notrace report_latency(cycle_t delta)
102 {
103         if (tracing_thresh) {
104                 if (delta < tracing_thresh)
105                         return 0;
106         } else {
107                 if (delta <= tracing_max_latency)
108                         return 0;
109         }
110         return 1;
111 }
112
113 static void notrace
114 check_critical_timing(struct trace_array *tr,
115                       struct trace_array_cpu *data,
116                       unsigned long parent_ip,
117                       int cpu)
118 {
119         unsigned long latency, t0, t1;
120         cycle_t T0, T1, T2, delta;
121         unsigned long flags;
122
123         /*
124          * usecs conversion is slow so we try to delay the conversion
125          * as long as possible:
126          */
127         T0 = data->preempt_timestamp;
128         T1 = now(cpu);
129         delta = T1-T0;
130
131         local_save_flags(flags);
132
133         if (!report_latency(delta))
134                 goto out;
135
136         ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
137         /*
138          * Update the timestamp, because the trace entry above
139          * might change it (it can only get larger so the latency
140          * is fair to be reported):
141          */
142         T2 = now(cpu);
143
144         delta = T2-T0;
145
146         latency = nsecs_to_usecs(delta);
147
148         if (data->critical_sequence != max_sequence)
149                 goto out;
150
151         tracing_max_latency = delta;
152         t0 = nsecs_to_usecs(T0);
153         t1 = nsecs_to_usecs(T1);
154
155         data->critical_end = parent_ip;
156
157         update_max_tr_single(tr, current, cpu);
158
159         if (tracing_thresh)
160                 printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
161                        "violates %lu us threshold.\n"
162                        " => started at timestamp %lu: ",
163                                 current->comm, current->pid,
164                                 raw_smp_processor_id(),
165                                 latency, nsecs_to_usecs(tracing_thresh), t0);
166         else
167                 printk(KERN_INFO "(%16s-%-5d|#%d):"
168                        " new %lu us maximum-latency "
169                        "critical section.\n => started at timestamp %lu: ",
170                                 current->comm, current->pid,
171                                 raw_smp_processor_id(),
172                                 latency, t0);
173
174         print_symbol(KERN_CONT "<%s>\n", data->critical_start);
175         printk(KERN_CONT " =>   ended at timestamp %lu: ", t1);
176         print_symbol(KERN_CONT "<%s>\n", data->critical_end);
177         dump_stack();
178         t1 = nsecs_to_usecs(now(cpu));
179         printk(KERN_CONT " =>   dump-end timestamp %lu\n\n", t1);
180
181         max_sequence++;
182
183 out:
184         data->critical_sequence = max_sequence;
185         data->preempt_timestamp = now(cpu);
186         tracing_reset(data);
187         ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
188 }
189
190 static inline void notrace
191 start_critical_timing(unsigned long ip, unsigned long parent_ip)
192 {
193         int cpu;
194         struct trace_array *tr = irqsoff_trace;
195         struct trace_array_cpu *data;
196         unsigned long flags;
197
198         if (likely(!tracer_enabled))
199                 return;
200
201         if (__get_cpu_var(tracing_cpu))
202                 return;
203
204         cpu = raw_smp_processor_id();
205         data = tr->data[cpu];
206
207         if (unlikely(!data) || unlikely(!data->trace) ||
208             atomic_read(&data->disabled))
209                 return;
210
211         atomic_inc(&data->disabled);
212
213         data->critical_sequence = max_sequence;
214         data->preempt_timestamp = now(cpu);
215         data->critical_start = parent_ip ? : ip;
216         tracing_reset(data);
217
218         local_save_flags(flags);
219
220         ftrace(tr, data, ip, parent_ip, flags);
221
222         __get_cpu_var(tracing_cpu) = 1;
223
224         atomic_dec(&data->disabled);
225 }
226
227 static inline void notrace
228 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
229 {
230         int cpu;
231         struct trace_array *tr = irqsoff_trace;
232         struct trace_array_cpu *data;
233         unsigned long flags;
234
235         /* Always clear the tracing cpu on stopping the trace */
236         if (unlikely(__get_cpu_var(tracing_cpu)))
237                 __get_cpu_var(tracing_cpu) = 0;
238         else
239                 return;
240
241         if (!tracer_enabled)
242                 return;
243
244         cpu = raw_smp_processor_id();
245         data = tr->data[cpu];
246
247         if (unlikely(!data) || unlikely(!data->trace) ||
248             !data->critical_start || atomic_read(&data->disabled))
249                 return;
250
251         atomic_inc(&data->disabled);
252         local_save_flags(flags);
253         ftrace(tr, data, ip, parent_ip, flags);
254         check_critical_timing(tr, data, parent_ip ? : ip, cpu);
255         data->critical_start = 0;
256         atomic_dec(&data->disabled);
257 }
258
259 /* start and stop critical timings used to for stoppage (in idle) */
260 void notrace start_critical_timings(void)
261 {
262         if (preempt_trace() || irq_trace())
263                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
264 }
265
266 void notrace stop_critical_timings(void)
267 {
268         if (preempt_trace() || irq_trace())
269                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
270 }
271
272 #ifdef CONFIG_IRQSOFF_TRACER
273 #ifdef CONFIG_PROVE_LOCKING
274 void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
275 {
276         if (!preempt_trace() && irq_trace())
277                 stop_critical_timing(a0, a1);
278 }
279
280 void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
281 {
282         if (!preempt_trace() && irq_trace())
283                 start_critical_timing(a0, a1);
284 }
285
286 #else /* !CONFIG_PROVE_LOCKING */
287
288 /*
289  * Stubs:
290  */
291
292 void early_boot_irqs_off(void)
293 {
294 }
295
296 void early_boot_irqs_on(void)
297 {
298 }
299
300 void trace_softirqs_on(unsigned long ip)
301 {
302 }
303
304 void trace_softirqs_off(unsigned long ip)
305 {
306 }
307
308 inline void print_irqtrace_events(struct task_struct *curr)
309 {
310 }
311
312 /*
313  * We are only interested in hardirq on/off events:
314  */
315 void notrace trace_hardirqs_on(void)
316 {
317         if (!preempt_trace() && irq_trace())
318                 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
319 }
320 EXPORT_SYMBOL(trace_hardirqs_on);
321
322 void notrace trace_hardirqs_off(void)
323 {
324         if (!preempt_trace() && irq_trace())
325                 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
326 }
327 EXPORT_SYMBOL(trace_hardirqs_off);
328
329 void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
330 {
331         if (!preempt_trace() && irq_trace())
332                 stop_critical_timing(CALLER_ADDR0, caller_addr);
333 }
334 EXPORT_SYMBOL(trace_hardirqs_on_caller);
335
336 void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
337 {
338         if (!preempt_trace() && irq_trace())
339                 start_critical_timing(CALLER_ADDR0, caller_addr);
340 }
341 EXPORT_SYMBOL(trace_hardirqs_off_caller);
342
343 #endif /* CONFIG_PROVE_LOCKING */
344 #endif /*  CONFIG_IRQSOFF_TRACER */
345
346 #ifdef CONFIG_PREEMPT_TRACER
347 void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
348 {
349         stop_critical_timing(a0, a1);
350 }
351
352 void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
353 {
354         start_critical_timing(a0, a1);
355 }
356 #endif /* CONFIG_PREEMPT_TRACER */
357
358 static void start_irqsoff_tracer(struct trace_array *tr)
359 {
360         tracer_enabled = 1;
361         register_ftrace_function(&trace_ops);
362 }
363
364 static void stop_irqsoff_tracer(struct trace_array *tr)
365 {
366         unregister_ftrace_function(&trace_ops);
367         tracer_enabled = 0;
368 }
369
370 static void __irqsoff_tracer_init(struct trace_array *tr)
371 {
372         irqsoff_trace = tr;
373         /* make sure that the tracer is visibel */
374         smp_wmb();
375
376         if (tr->ctrl)
377                 start_irqsoff_tracer(tr);
378 }
379
380 static void irqsoff_tracer_reset(struct trace_array *tr)
381 {
382         if (tr->ctrl)
383                 stop_irqsoff_tracer(tr);
384 }
385
386 static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
387 {
388         if (tr->ctrl)
389                 start_irqsoff_tracer(tr);
390         else
391                 stop_irqsoff_tracer(tr);
392 }
393
394 static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
395 {
396         /* stop the trace while dumping */
397         if (iter->tr->ctrl)
398                 stop_irqsoff_tracer(iter->tr);
399 }
400
401 static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
402 {
403         if (iter->tr->ctrl)
404                 start_irqsoff_tracer(iter->tr);
405 }
406
407 #ifdef CONFIG_IRQSOFF_TRACER
408 static void irqsoff_tracer_init(struct trace_array *tr)
409 {
410         trace_type = TRACER_IRQS_OFF;
411
412         __irqsoff_tracer_init(tr);
413 }
414 static struct tracer irqsoff_tracer __read_mostly =
415 {
416         .name           = "irqsoff",
417         .init           = irqsoff_tracer_init,
418         .reset          = irqsoff_tracer_reset,
419         .open           = irqsoff_tracer_open,
420         .close          = irqsoff_tracer_close,
421         .ctrl_update    = irqsoff_tracer_ctrl_update,
422         .print_max      = 1,
423 };
424 # define register_irqsoff(trace) register_tracer(&trace)
425 #else
426 # define register_irqsoff(trace) do { } while (0)
427 #endif
428
429 #ifdef CONFIG_PREEMPT_TRACER
430 static void preemptoff_tracer_init(struct trace_array *tr)
431 {
432         trace_type = TRACER_PREEMPT_OFF;
433
434         __irqsoff_tracer_init(tr);
435 }
436
437 static struct tracer preemptoff_tracer __read_mostly =
438 {
439         .name           = "preemptoff",
440         .init           = preemptoff_tracer_init,
441         .reset          = irqsoff_tracer_reset,
442         .open           = irqsoff_tracer_open,
443         .close          = irqsoff_tracer_close,
444         .ctrl_update    = irqsoff_tracer_ctrl_update,
445         .print_max      = 1,
446 };
447 # define register_preemptoff(trace) register_tracer(&trace)
448 #else
449 # define register_preemptoff(trace) do { } while (0)
450 #endif
451
452 #if defined(CONFIG_IRQSOFF_TRACER) && \
453         defined(CONFIG_PREEMPT_TRACER)
454
455 static void preemptirqsoff_tracer_init(struct trace_array *tr)
456 {
457         trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
458
459         __irqsoff_tracer_init(tr);
460 }
461
462 static struct tracer preemptirqsoff_tracer __read_mostly =
463 {
464         .name           = "preemptirqsoff",
465         .init           = preemptirqsoff_tracer_init,
466         .reset          = irqsoff_tracer_reset,
467         .open           = irqsoff_tracer_open,
468         .close          = irqsoff_tracer_close,
469         .ctrl_update    = irqsoff_tracer_ctrl_update,
470         .print_max      = 1,
471 };
472
473 # define register_preemptirqsoff(trace) register_tracer(&trace)
474 #else
475 # define register_preemptirqsoff(trace) do { } while (0)
476 #endif
477
478 __init static int init_irqsoff_tracer(void)
479 {
480         register_irqsoff(irqsoff_tracer);
481         register_preemptoff(preemptoff_tracer);
482         register_preemptirqsoff(preemptirqsoff_tracer);
483
484         return 0;
485 }
486 device_initcall(init_irqsoff_tracer);