ftrace: timestamp syncing, prepare
[linux-2.6.git] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19
20 static void notrace
21 ctx_switch_func(struct task_struct *prev, struct task_struct *next)
22 {
23         struct trace_array *tr = ctx_trace;
24         struct trace_array_cpu *data;
25         unsigned long flags;
26         long disabled;
27         int cpu;
28
29         if (!tracer_enabled)
30                 return;
31
32         local_irq_save(flags);
33         cpu = raw_smp_processor_id();
34         data = tr->data[cpu];
35         disabled = atomic_inc_return(&data->disabled);
36
37         if (likely(disabled == 1))
38                 tracing_sched_switch_trace(tr, data, prev, next, flags);
39
40         atomic_dec(&data->disabled);
41         local_irq_restore(flags);
42 }
43
44 void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
45 {
46         tracing_record_cmdline(prev);
47
48         /*
49          * If tracer_switch_func only points to the local
50          * switch func, it still needs the ptr passed to it.
51          */
52         ctx_switch_func(prev, next);
53
54         /*
55          * Chain to the wakeup tracer (this is a NOP if disabled):
56          */
57         wakeup_sched_switch(prev, next);
58 }
59
60 static notrace void sched_switch_reset(struct trace_array *tr)
61 {
62         int cpu;
63
64         tr->time_start = ftrace_now(tr->cpu);
65
66         for_each_online_cpu(cpu)
67                 tracing_reset(tr->data[cpu]);
68 }
69
70 static notrace void start_sched_trace(struct trace_array *tr)
71 {
72         sched_switch_reset(tr);
73         tracer_enabled = 1;
74 }
75
76 static notrace void stop_sched_trace(struct trace_array *tr)
77 {
78         tracer_enabled = 0;
79 }
80
81 static notrace void sched_switch_trace_init(struct trace_array *tr)
82 {
83         ctx_trace = tr;
84
85         if (tr->ctrl)
86                 start_sched_trace(tr);
87 }
88
89 static notrace void sched_switch_trace_reset(struct trace_array *tr)
90 {
91         if (tr->ctrl)
92                 stop_sched_trace(tr);
93 }
94
95 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
96 {
97         /* When starting a new trace, reset the buffers */
98         if (tr->ctrl)
99                 start_sched_trace(tr);
100         else
101                 stop_sched_trace(tr);
102 }
103
104 static struct tracer sched_switch_trace __read_mostly =
105 {
106         .name           = "sched_switch",
107         .init           = sched_switch_trace_init,
108         .reset          = sched_switch_trace_reset,
109         .ctrl_update    = sched_switch_trace_ctrl_update,
110 #ifdef CONFIG_FTRACE_SELFTEST
111         .selftest    = trace_selftest_startup_sched_switch,
112 #endif
113 };
114
115 __init static int init_sched_switch_trace(void)
116 {
117         return register_tracer(&sched_switch_trace);
118 }
119 device_initcall(init_sched_switch_trace);