blob: 5b781d2be383f7631a41238dca2b6cc4cc6878f2 [file] [log] [blame]
Steven Rostedt1b29b012008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010010 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt1b29b012008-05-12 21:20:42 +020011 */
Steven Rostedt23b4ff32009-02-14 19:04:24 -050012#include <linux/ring_buffer.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020013#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050016#include <linux/slab.h>
Ingo Molnar2e0f5762008-05-12 21:20:49 +020017#include <linux/fs.h>
Steven Rostedt1b29b012008-05-12 21:20:42 +020018
19#include "trace.h"
20
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050021static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
Steven Rostedta225cdd2009-01-15 23:06:03 -050032
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050033/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
Steven Rostedt53614992009-01-15 19:12:40 -050037
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050038static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
45
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
Steven Rostedta225cdd2009-01-15 23:06:03 -050054
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050055
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /* The top level array uses the "global_ops". */
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
63 ret = allocate_ftrace_ops(tr);
64 if (ret)
65 return ret;
66 }
67
68 ftrace_create_filter_files(tr->ops, parent);
69
70 return 0;
71}
72
73void ftrace_destroy_function_files(struct trace_array *tr)
74{
75 ftrace_destroy_filter_files(tr->ops);
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
79
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -020080static int function_trace_init(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +020081{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050082 struct ftrace_ops *ops;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -050083
84 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
85 /* There's only one global tr */
86 if (!trace_ops.private) {
87 trace_ops.private = tr;
88 trace_stack_ops.private = tr;
89 }
90
91 if (func_flags.val & TRACE_FUNC_OPT_STACK)
92 ops = &trace_stack_ops;
93 else
94 ops = &trace_ops;
95 tr->ops = ops;
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -050096 } else if (!tr->ops) {
97 /*
98 * Instance trace_arrays get their ops allocated
99 * at instance creation. Unless it failed
100 * the allocation.
101 */
102 return -ENOMEM;
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500103 }
104
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500105 tr->trace_buffer.cpu = get_cpu();
Steven Rostedt26bc83f2008-07-10 20:58:14 -0400106 put_cpu();
107
Steven Rostedt41bc8142008-05-22 11:49:22 -0400108 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500109 tracing_start_function_trace(tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100110 return 0;
Steven Rostedt1b29b012008-05-12 21:20:42 +0200111}
112
Ingo Molnare309b412008-05-12 21:20:51 +0200113static void function_trace_reset(struct trace_array *tr)
Steven Rostedt1b29b012008-05-12 21:20:42 +0200114{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500115 tracing_stop_function_trace(tr);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200116 tracing_stop_cmdline_record();
Steven Rostedt1b29b012008-05-12 21:20:42 +0200117}
118
Steven Rostedt90369902008-11-05 16:05:44 -0500119static void function_trace_start(struct trace_array *tr)
120{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500121 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt90369902008-11-05 16:05:44 -0500122}
123
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500124static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400125function_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400126 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500127{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500128 struct trace_array *tr = op->private;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500129 struct trace_array_cpu *data;
130 unsigned long flags;
Steven Rostedtd41032a2013-01-24 07:52:34 -0500131 int bit;
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500132 int cpu;
133 int pc;
134
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500135 if (unlikely(!tr->function_enabled))
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500136 return;
137
Steven Rostedt897f68a2012-11-02 17:52:35 -0400138 pc = preempt_count();
139 preempt_disable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500140
Steven Rostedt897f68a2012-11-02 17:52:35 -0400141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142 if (bit < 0)
143 goto out;
144
145 cpu = smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500146 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt897f68a2012-11-02 17:52:35 -0400147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500149 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500150 }
Steven Rostedt897f68a2012-11-02 17:52:35 -0400151 trace_clear_recursion(bit);
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500152
Steven Rostedt897f68a2012-11-02 17:52:35 -0400153 out:
154 preempt_enable_notrace();
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500155}
156
157static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400158function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400159 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt53614992009-01-15 19:12:40 -0500160{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500161 struct trace_array *tr = op->private;
Steven Rostedt53614992009-01-15 19:12:40 -0500162 struct trace_array_cpu *data;
163 unsigned long flags;
164 long disabled;
165 int cpu;
166 int pc;
167
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500168 if (unlikely(!tr->function_enabled))
Steven Rostedt53614992009-01-15 19:12:40 -0500169 return;
170
171 /*
172 * Need to use raw, since this must be called before the
173 * recursive protection is performed.
174 */
175 local_irq_save(flags);
176 cpu = raw_smp_processor_id();
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500177 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt53614992009-01-15 19:12:40 -0500178 disabled = atomic_inc_return(&data->disabled);
179
180 if (likely(disabled == 1)) {
181 pc = preempt_count();
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500182 trace_function(tr, ip, parent_ip, flags, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500183 /*
184 * skip over 5 funcs:
185 * __ftrace_trace_stack,
186 * __trace_stack,
187 * function_stack_trace_call
188 * ftrace_list_func
189 * ftrace_call
190 */
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -0500191 __trace_stack(tr, flags, 5, pc);
Steven Rostedt53614992009-01-15 19:12:40 -0500192 }
193
194 atomic_dec(&data->disabled);
195 local_irq_restore(flags);
196}
197
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500198static struct ftrace_ops trace_ops __read_mostly =
199{
200 .func = function_trace_call,
Steven Rostedt47409742012-07-20 11:04:44 -0400201 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedtbb3c3c92009-01-15 20:40:23 -0500202};
203
Steven Rostedt53614992009-01-15 19:12:40 -0500204static struct ftrace_ops trace_stack_ops __read_mostly =
205{
206 .func = function_stack_trace_call,
Steven Rostedt47409742012-07-20 11:04:44 -0400207 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt53614992009-01-15 19:12:40 -0500208};
209
Steven Rostedt53614992009-01-15 19:12:40 -0500210static struct tracer_opt func_opts[] = {
211#ifdef CONFIG_STACKTRACE
212 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
213#endif
214 { } /* Always set a last empty entry */
215};
216
217static struct tracer_flags func_flags = {
218 .val = 0, /* By default: all flags disabled */
219 .opts = func_opts
220};
221
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500222static void tracing_start_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500223{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500224 tr->function_enabled = 0;
225 register_ftrace_function(tr->ops);
226 tr->function_enabled = 1;
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500227}
228
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500229static void tracing_stop_function_trace(struct trace_array *tr)
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500230{
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500231 tr->function_enabled = 0;
232 unregister_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500233}
234
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -0500235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Steven Rostedt53614992009-01-15 19:12:40 -0500237{
Anton Vorontsovf555f122012-07-09 17:10:46 -0700238 switch (bit) {
239 case TRACE_FUNC_OPT_STACK:
Steven Rostedt53614992009-01-15 19:12:40 -0500240 /* do nothing if already set */
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
Anton Vorontsovf555f122012-07-09 17:10:46 -0700242 break;
Steven Rostedt53614992009-01-15 19:12:40 -0500243
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500244 unregister_ftrace_function(tr->ops);
245
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500246 if (set) {
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500247 tr->ops = &trace_stack_ops;
248 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500249 } else {
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500250 tr->ops = &trace_ops;
251 register_ftrace_function(tr->ops);
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500252 }
Steven Rostedt53614992009-01-15 19:12:40 -0500253
Anton Vorontsovf555f122012-07-09 17:10:46 -0700254 break;
Anton Vorontsovf555f122012-07-09 17:10:46 -0700255 default:
256 return -EINVAL;
Steven Rostedt53614992009-01-15 19:12:40 -0500257 }
258
Anton Vorontsovf555f122012-07-09 17:10:46 -0700259 return 0;
Steven Rostedt53614992009-01-15 19:12:40 -0500260}
261
Steven Rostedt (Red Hat)8f768992013-07-18 14:41:51 -0400262static struct tracer function_trace __tracer_data =
Steven Rostedt1b29b012008-05-12 21:20:42 +0200263{
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500264 .name = "function",
265 .init = function_trace_init,
266 .reset = function_trace_reset,
267 .start = function_trace_start,
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100268 .wait_pipe = poll_wait_pipe,
Steven Rostedt53614992009-01-15 19:12:40 -0500269 .flags = &func_flags,
270 .set_flag = func_set_flag,
Steven Rostedt (Red Hat)f20a5802013-11-07 20:08:58 -0500271 .allow_instances = true,
Steven Rostedt60a11772008-05-12 21:20:44 +0200272#ifdef CONFIG_FTRACE_SELFTEST
Steven Rostedt3eb36aa2009-01-15 22:21:43 -0500273 .selftest = trace_selftest_startup_function,
Steven Rostedt60a11772008-05-12 21:20:44 +0200274#endif
Steven Rostedt1b29b012008-05-12 21:20:42 +0200275};
276
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500277#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500278static int update_count(void **data)
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500279{
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500280 unsigned long *count = (long *)data;
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500281
282 if (!*count)
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500283 return 0;
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500284
285 if (*count != -1)
286 (*count)--;
287
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500288 return 1;
289}
290
291static void
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500292ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500293{
294 if (tracing_is_on())
295 return;
296
297 if (update_count(data))
298 tracing_on();
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500299}
300
301static void
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500302ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500303{
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500304 if (!tracing_is_on())
305 return;
306
Steven Rostedt (Red Hat)1c317142013-03-09 08:36:53 -0500307 if (update_count(data))
308 tracing_off();
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500309}
310
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500311static void
312ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
313{
314 if (tracing_is_on())
315 return;
316
317 tracing_on();
318}
319
320static void
321ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
322{
323 if (!tracing_is_on())
324 return;
325
326 tracing_off();
327}
328
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400329/*
330 * Skip 4:
331 * ftrace_stacktrace()
332 * function_trace_probe_call()
333 * ftrace_ops_list_func()
334 * ftrace_call()
335 */
336#define STACK_SKIP 4
Steven Rostedte110e3d2009-02-16 23:38:13 -0500337
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400338static void
339ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
340{
341 trace_dump_stack(STACK_SKIP);
342}
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500343
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400344static void
345ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
346{
347 if (!tracing_is_on())
348 return;
Steven Rostedt (Red Hat)8380d242013-03-09 08:56:43 -0500349
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400350 if (update_count(data))
351 trace_dump_stack(STACK_SKIP);
352}
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500353
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400354static void
355ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
356{
357 if (update_count(data))
358 ftrace_dump(DUMP_ALL);
359}
360
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400361/* Only dump the current CPU buffer. */
362static void
363ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
364{
365 if (update_count(data))
366 ftrace_dump(DUMP_ORIG);
367}
368
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500369static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400370ftrace_probe_print(const char *name, struct seq_file *m,
371 unsigned long ip, void *data)
Steven Rostedte110e3d2009-02-16 23:38:13 -0500372{
Steven Rostedte110e3d2009-02-16 23:38:13 -0500373 long count = (long)data;
374
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400375 seq_printf(m, "%ps:%s", (void *)ip, name);
Steven Rostedte110e3d2009-02-16 23:38:13 -0500376
Steven Rostedt35ebf1c2009-02-17 13:12:12 -0500377 if (count == -1)
378 seq_printf(m, ":unlimited\n");
379 else
Li Zefan00e54d02009-06-25 14:05:27 +0800380 seq_printf(m, ":count=%ld\n", count);
Steven Rostedte110e3d2009-02-16 23:38:13 -0500381
382 return 0;
383}
384
385static int
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400386ftrace_traceon_print(struct seq_file *m, unsigned long ip,
387 struct ftrace_probe_ops *ops, void *data)
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500388{
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400389 return ftrace_probe_print("traceon", m, ip, data);
390}
391
392static int
393ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
394 struct ftrace_probe_ops *ops, void *data)
395{
396 return ftrace_probe_print("traceoff", m, ip, data);
397}
398
399static int
400ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
401 struct ftrace_probe_ops *ops, void *data)
402{
403 return ftrace_probe_print("stacktrace", m, ip, data);
404}
405
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400406static int
407ftrace_dump_print(struct seq_file *m, unsigned long ip,
408 struct ftrace_probe_ops *ops, void *data)
409{
410 return ftrace_probe_print("dump", m, ip, data);
411}
412
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400413static int
414ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
415 struct ftrace_probe_ops *ops, void *data)
416{
417 return ftrace_probe_print("cpudump", m, ip, data);
418}
419
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400420static struct ftrace_probe_ops traceon_count_probe_ops = {
421 .func = ftrace_traceon_count,
422 .print = ftrace_traceon_print,
423};
424
425static struct ftrace_probe_ops traceoff_count_probe_ops = {
426 .func = ftrace_traceoff_count,
427 .print = ftrace_traceoff_print,
428};
429
430static struct ftrace_probe_ops stacktrace_count_probe_ops = {
431 .func = ftrace_stacktrace_count,
432 .print = ftrace_stacktrace_print,
433};
434
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400435static struct ftrace_probe_ops dump_probe_ops = {
436 .func = ftrace_dump_probe,
437 .print = ftrace_dump_print,
438};
439
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400440static struct ftrace_probe_ops cpudump_probe_ops = {
441 .func = ftrace_cpudump_probe,
442 .print = ftrace_cpudump_print,
443};
444
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400445static struct ftrace_probe_ops traceon_probe_ops = {
446 .func = ftrace_traceon,
447 .print = ftrace_traceon_print,
448};
449
450static struct ftrace_probe_ops traceoff_probe_ops = {
451 .func = ftrace_traceoff,
452 .print = ftrace_traceoff_print,
453};
454
455static struct ftrace_probe_ops stacktrace_probe_ops = {
456 .func = ftrace_stacktrace,
457 .print = ftrace_stacktrace_print,
458};
459
460static int
461ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
462 struct ftrace_hash *hash, char *glob,
463 char *cmd, char *param, int enable)
464{
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500465 void *count = (void *)-1;
466 char *number;
467 int ret;
468
469 /* hash funcs only work with set_ftrace_filter */
470 if (!enable)
471 return -EINVAL;
472
Steven Rostedt (Red Hat)8b8fa622013-03-12 09:25:00 -0400473 if (glob[0] == '!') {
474 unregister_ftrace_function_probe_func(glob+1, ops);
475 return 0;
476 }
477
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500478 if (!param)
479 goto out_reg;
480
481 number = strsep(&param, ":");
482
483 if (!strlen(number))
484 goto out_reg;
485
486 /*
487 * We use the callback data field (which is a pointer)
488 * as our counter.
489 */
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200490 ret = kstrtoul(number, 0, (unsigned long *)&count);
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500491 if (ret)
492 return ret;
493
494 out_reg:
Steven Rostedtb6887d72009-02-17 12:32:04 -0500495 ret = register_ftrace_function_probe(glob, ops, count);
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500496
Xiao Guangrong04aef322009-07-15 12:29:06 +0800497 return ret < 0 ? ret : 0;
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500498}
499
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400500static int
501ftrace_trace_onoff_callback(struct ftrace_hash *hash,
502 char *glob, char *cmd, char *param, int enable)
503{
504 struct ftrace_probe_ops *ops;
505
506 /* we register both traceon and traceoff to this callback */
507 if (strcmp(cmd, "traceon") == 0)
508 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
509 else
510 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
511
512 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
513 param, enable);
514}
515
516static int
517ftrace_stacktrace_callback(struct ftrace_hash *hash,
518 char *glob, char *cmd, char *param, int enable)
519{
520 struct ftrace_probe_ops *ops;
521
522 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
523
524 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
525 param, enable);
526}
527
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400528static int
529ftrace_dump_callback(struct ftrace_hash *hash,
530 char *glob, char *cmd, char *param, int enable)
531{
532 struct ftrace_probe_ops *ops;
533
534 ops = &dump_probe_ops;
535
536 /* Only dump once. */
537 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
538 "1", enable);
539}
540
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400541static int
542ftrace_cpudump_callback(struct ftrace_hash *hash,
543 char *glob, char *cmd, char *param, int enable)
544{
545 struct ftrace_probe_ops *ops;
546
547 ops = &cpudump_probe_ops;
548
549 /* Only dump once. */
550 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
551 "1", enable);
552}
553
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500554static struct ftrace_func_command ftrace_traceon_cmd = {
555 .name = "traceon",
556 .func = ftrace_trace_onoff_callback,
557};
558
559static struct ftrace_func_command ftrace_traceoff_cmd = {
560 .name = "traceoff",
561 .func = ftrace_trace_onoff_callback,
562};
563
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400564static struct ftrace_func_command ftrace_stacktrace_cmd = {
565 .name = "stacktrace",
566 .func = ftrace_stacktrace_callback,
567};
568
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400569static struct ftrace_func_command ftrace_dump_cmd = {
570 .name = "dump",
571 .func = ftrace_dump_callback,
572};
573
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400574static struct ftrace_func_command ftrace_cpudump_cmd = {
575 .name = "cpudump",
576 .func = ftrace_cpudump_callback,
577};
578
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500579static int __init init_func_cmd_traceon(void)
580{
581 int ret;
582
583 ret = register_ftrace_command(&ftrace_traceoff_cmd);
584 if (ret)
585 return ret;
586
587 ret = register_ftrace_command(&ftrace_traceon_cmd);
588 if (ret)
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400589 goto out_free_traceoff;
Steven Rostedt (Red Hat)dd42cd32013-03-13 10:17:50 -0400590
591 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400592 if (ret)
593 goto out_free_traceon;
594
595 ret = register_ftrace_command(&ftrace_dump_cmd);
596 if (ret)
597 goto out_free_stacktrace;
598
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400599 ret = register_ftrace_command(&ftrace_cpudump_cmd);
600 if (ret)
601 goto out_free_dump;
602
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400603 return 0;
604
Steven Rostedt (Red Hat)90e3c032013-04-30 19:00:46 -0400605 out_free_dump:
606 unregister_ftrace_command(&ftrace_dump_cmd);
Steven Rostedt (Red Hat)ad71d882013-04-30 15:46:14 -0400607 out_free_stacktrace:
608 unregister_ftrace_command(&ftrace_stacktrace_cmd);
609 out_free_traceon:
610 unregister_ftrace_command(&ftrace_traceon_cmd);
611 out_free_traceoff:
612 unregister_ftrace_command(&ftrace_traceoff_cmd);
613
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500614 return ret;
615}
616#else
617static inline int init_func_cmd_traceon(void)
618{
619 return 0;
620}
621#endif /* CONFIG_DYNAMIC_FTRACE */
622
Steven Rostedt1b29b012008-05-12 21:20:42 +0200623static __init int init_function_trace(void)
624{
Steven Rostedt23b4ff32009-02-14 19:04:24 -0500625 init_func_cmd_traceon();
Steven Rostedt1b29b012008-05-12 21:20:42 +0200626 return register_tracer(&function_trace);
627}
Steven Rostedt6f415672012-10-05 12:13:07 -0400628core_initcall(init_function_trace);