blob: 6ff07ad0ede30343b454da332d41697610b8c304 [file] [log] [blame]
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
Steven Rostedt3d083392008-05-12 21:20:42 +020016#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020019#include <linux/seq_file.h>
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -080020#include <linux/suspend.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020021#include <linux/debugfs.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020022#include <linux/hardirq.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010023#include <linux/kthread.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020024#include <linux/uaccess.h>
Steven Rostedt5855fea2011-12-16 19:27:42 -050025#include <linux/bsearch.h>
Paul Gortmaker56d82e02011-05-26 17:53:52 -040026#include <linux/module.h>
Ingo Molnar2d8b8202008-02-23 16:55:50 +010027#include <linux/ftrace.h>
Steven Rostedtb0fc4942008-05-12 21:20:43 +020028#include <linux/sysctl.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Steven Rostedt5072c592008-05-12 21:20:43 +020030#include <linux/ctype.h>
Steven Rostedt68950612011-12-16 17:06:45 -050031#include <linux/sort.h>
Steven Rostedt3d083392008-05-12 21:20:42 +020032#include <linux/list.h>
Steven Rostedt59df055f2009-02-14 15:29:06 -050033#include <linux/hash.h>
Paul E. McKenney3f379b02010-03-05 15:03:25 -080034#include <linux/rcupdate.h>
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020035
Steven Rostedtad8d75f2009-04-14 19:39:12 -040036#include <trace/events/sched.h>
Steven Rostedt8aef2d22009-03-24 01:10:15 -040037
Steven Rostedt2af15d62009-05-28 13:37:24 -040038#include <asm/setup.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053039
Steven Rostedt0706f1c2009-03-23 23:12:58 -040040#include "trace_output.h"
Steven Rostedtbac429f2009-03-20 12:50:56 -040041#include "trace_stat.h"
Steven Rostedt3d083392008-05-12 21:20:42 +020042
Steven Rostedt69128962008-10-23 09:33:03 -040043#define FTRACE_WARN_ON(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040044 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040047 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040048 ___r; \
49 })
Steven Rostedt69128962008-10-23 09:33:03 -040050
51#define FTRACE_WARN_ON_ONCE(cond) \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040052 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
Steven Rostedt69128962008-10-23 09:33:03 -040055 ftrace_kill(); \
Steven Rostedt0778d9a2011-04-29 10:36:31 -040056 ___r; \
57 })
Steven Rostedt69128962008-10-23 09:33:03 -040058
Steven Rostedt8fc0c702009-02-16 15:28:00 -050059/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
Steven Rostedt33dc9b12011-05-02 17:34:47 -040062#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
Steven Rostedt8fc0c702009-02-16 15:28:00 -050064
Jiri Olsae2484912012-02-15 15:51:48 +010065#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040067static struct ftrace_ops ftrace_list_end __read_mostly = {
68 .func = ftrace_stub,
69};
70
Steven Rostedt4eebcc82008-05-12 21:20:48 +020071/* ftrace_enabled is a method to turn ftrace on or off */
72int ftrace_enabled __read_mostly;
Steven Rostedtd61f82d2008-05-12 21:20:43 +020073static int last_ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +020074
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050075/* Quick disabling of function tracer. */
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040076int function_trace_stop __read_mostly;
77
78/* Current function tracing op */
79struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -050080
jolsa@redhat.com756d17e2009-10-13 16:33:52 -040081/* List for set_ftrace_pid's pids. */
82LIST_HEAD(ftrace_pids);
83struct ftrace_pid {
84 struct list_head list;
85 struct pid *pid;
86};
87
Steven Rostedt4eebcc82008-05-12 21:20:48 +020088/*
89 * ftrace_disabled is set when an anomaly is discovered.
90 * ftrace_disabled is much stronger than ftrace_enabled.
91 */
92static int ftrace_disabled __read_mostly;
93
Steven Rostedt52baf112009-02-14 01:15:39 -050094static DEFINE_MUTEX(ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +020095
Steven Rostedtb8489142011-05-04 09:27:52 -040096static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
Jiri Olsae2484912012-02-15 15:51:48 +010097static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -040098static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020099ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500100ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
Steven Rostedt2b499382011-05-03 22:49:52 -0400101static struct ftrace_ops global_ops;
Jiri Olsae2484912012-02-15 15:51:48 +0100102static struct ftrace_ops control_ops;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200103
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400104#if ARCH_SUPPORTS_FTRACE_OPS
105static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400106 struct ftrace_ops *op, struct pt_regs *regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400107#else
108/* See comment below, where ftrace_ops_list_func is defined */
109static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
110#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
111#endif
Steven Rostedtb8489142011-05-04 09:27:52 -0400112
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800113/*
Steven Rostedtb8489142011-05-04 09:27:52 -0400114 * Traverse the ftrace_global_list, invoking all entries. The reason that we
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800115 * can use rcu_dereference_raw() is that elements removed from this list
116 * are simply leaked, so there is no need to interact with a grace-period
117 * mechanism. The rcu_dereference_raw() calls are needed to handle
Steven Rostedtb8489142011-05-04 09:27:52 -0400118 * concurrent insertions into the ftrace_global_list.
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800119 *
120 * Silly Alpha and silly pointer-speculation compiler optimizations!
121 */
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400122static void
123ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400124 struct ftrace_ops *op, struct pt_regs *regs)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200125{
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400126 if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
127 return;
128
129 trace_recursion_set(TRACE_GLOBAL_BIT);
130 op = rcu_dereference_raw(ftrace_global_list); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200131 while (op != &ftrace_list_end) {
Steven Rostedta1e2e312011-08-09 12:50:46 -0400132 op->func(ip, parent_ip, op, regs);
Paul E. McKenney3f379b02010-03-05 15:03:25 -0800133 op = rcu_dereference_raw(op->next); /*see above*/
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200134 };
Steven Rostedtb1cff0a2011-05-25 14:27:43 -0400135 trace_recursion_clear(TRACE_GLOBAL_BIT);
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200136}
137
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400138static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400139 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500140{
Steven Rostedt0ef8cde2008-12-03 15:36:58 -0500141 if (!test_tsk_trace_trace(current))
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500142 return;
143
Steven Rostedta1e2e312011-08-09 12:50:46 -0400144 ftrace_pid_function(ip, parent_ip, op, regs);
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500145}
146
147static void set_ftrace_pid_function(ftrace_func_t func)
148{
149 /* do not set ftrace_pid_function to itself! */
150 if (func != ftrace_pid_func)
151 ftrace_pid_function = func;
152}
153
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200154/**
Steven Rostedt3d083392008-05-12 21:20:42 +0200155 * clear_ftrace_function - reset the ftrace function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200156 *
Steven Rostedt3d083392008-05-12 21:20:42 +0200157 * This NULLs the ftrace function and in essence stops
158 * tracing. There may be lag
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200159 */
Steven Rostedt3d083392008-05-12 21:20:42 +0200160void clear_ftrace_function(void)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200161{
Steven Rostedt3d083392008-05-12 21:20:42 +0200162 ftrace_trace_function = ftrace_stub;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500163 ftrace_pid_function = ftrace_stub;
Steven Rostedt3d083392008-05-12 21:20:42 +0200164}
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200165
Jiri Olsae2484912012-02-15 15:51:48 +0100166static void control_ops_disable_all(struct ftrace_ops *ops)
167{
168 int cpu;
169
170 for_each_possible_cpu(cpu)
171 *per_cpu_ptr(ops->disabled, cpu) = 1;
172}
173
174static int control_ops_alloc(struct ftrace_ops *ops)
175{
176 int __percpu *disabled;
177
178 disabled = alloc_percpu(int);
179 if (!disabled)
180 return -ENOMEM;
181
182 ops->disabled = disabled;
183 control_ops_disable_all(ops);
184 return 0;
185}
186
187static void control_ops_free(struct ftrace_ops *ops)
188{
189 free_percpu(ops->disabled);
190}
191
Steven Rostedt2b499382011-05-03 22:49:52 -0400192static void update_global_ops(void)
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400193{
194 ftrace_func_t func;
195
196 /*
197 * If there's only one function registered, then call that
198 * function directly. Otherwise, we need to iterate over the
199 * registered callers.
200 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400201 if (ftrace_global_list == &ftrace_list_end ||
202 ftrace_global_list->next == &ftrace_list_end)
203 func = ftrace_global_list->func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400204 else
Steven Rostedtb8489142011-05-04 09:27:52 -0400205 func = ftrace_global_list_func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400206
207 /* If we filter on pids, update to use the pid function */
208 if (!list_empty(&ftrace_pids)) {
209 set_ftrace_pid_function(func);
210 func = ftrace_pid_func;
211 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400212
213 global_ops.func = func;
214}
215
216static void update_ftrace_function(void)
217{
218 ftrace_func_t func;
219
220 update_global_ops();
221
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400222 /*
223 * If we are at the end of the list and this ops is
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400224 * not dynamic and the arch supports passing ops, then have the
225 * mcount trampoline call the function directly.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400226 */
Steven Rostedtb8489142011-05-04 09:27:52 -0400227 if (ftrace_ops_list == &ftrace_list_end ||
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400228 (ftrace_ops_list->next == &ftrace_list_end &&
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400229 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
Steven Rostedtccf36722012-06-05 09:44:25 -0400230 !FTRACE_FORCE_LIST_FUNC)) {
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400231 /* Set the ftrace_ops that the arch callback uses */
232 if (ftrace_ops_list == &global_ops)
233 function_trace_op = ftrace_global_list;
234 else
235 function_trace_op = ftrace_ops_list;
Steven Rostedtb8489142011-05-04 09:27:52 -0400236 func = ftrace_ops_list->func;
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400237 } else {
238 /* Just use the default ftrace_ops */
239 function_trace_op = &ftrace_list_end;
Steven Rostedtb8489142011-05-04 09:27:52 -0400240 func = ftrace_ops_list_func;
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400241 }
Steven Rostedt2b499382011-05-03 22:49:52 -0400242
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400243 ftrace_trace_function = func;
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400244}
245
Steven Rostedt2b499382011-05-03 22:49:52 -0400246static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
Steven Rostedt3d083392008-05-12 21:20:42 +0200247{
Steven Rostedt2b499382011-05-03 22:49:52 -0400248 ops->next = *list;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200249 /*
Steven Rostedtb8489142011-05-04 09:27:52 -0400250 * We are entering ops into the list but another
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200251 * CPU might be walking that list. We need to make sure
252 * the ops->next pointer is valid before another CPU sees
Steven Rostedtb8489142011-05-04 09:27:52 -0400253 * the ops pointer included into the list.
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200254 */
Steven Rostedt2b499382011-05-03 22:49:52 -0400255 rcu_assign_pointer(*list, ops);
256}
Steven Rostedt3d083392008-05-12 21:20:42 +0200257
Steven Rostedt2b499382011-05-03 22:49:52 -0400258static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
259{
260 struct ftrace_ops **p;
261
262 /*
263 * If we are removing the last function, then simply point
264 * to the ftrace_stub.
265 */
266 if (*list == ops && ops->next == &ftrace_list_end) {
267 *list = &ftrace_list_end;
268 return 0;
269 }
270
271 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
272 if (*p == ops)
273 break;
274
275 if (*p != ops)
276 return -1;
277
278 *p = (*p)->next;
279 return 0;
280}
281
Jiri Olsae2484912012-02-15 15:51:48 +0100282static void add_ftrace_list_ops(struct ftrace_ops **list,
283 struct ftrace_ops *main_ops,
284 struct ftrace_ops *ops)
285{
286 int first = *list == &ftrace_list_end;
287 add_ftrace_ops(list, ops);
288 if (first)
289 add_ftrace_ops(&ftrace_ops_list, main_ops);
290}
291
292static int remove_ftrace_list_ops(struct ftrace_ops **list,
293 struct ftrace_ops *main_ops,
294 struct ftrace_ops *ops)
295{
296 int ret = remove_ftrace_ops(list, ops);
297 if (!ret && *list == &ftrace_list_end)
298 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
299 return ret;
300}
301
Steven Rostedt2b499382011-05-03 22:49:52 -0400302static int __register_ftrace_function(struct ftrace_ops *ops)
303{
Borislav Petkov8d240dd2012-03-29 19:11:40 +0200304 if (unlikely(ftrace_disabled))
Steven Rostedt2b499382011-05-03 22:49:52 -0400305 return -ENODEV;
306
307 if (FTRACE_WARN_ON(ops == &global_ops))
308 return -EINVAL;
309
Steven Rostedtb8489142011-05-04 09:27:52 -0400310 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
311 return -EBUSY;
312
Jiri Olsae2484912012-02-15 15:51:48 +0100313 /* We don't support both control and global flags set. */
314 if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
315 return -EINVAL;
316
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400317 if (!core_kernel_data((unsigned long)ops))
318 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
319
Steven Rostedtb8489142011-05-04 09:27:52 -0400320 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
Jiri Olsae2484912012-02-15 15:51:48 +0100321 add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400322 ops->flags |= FTRACE_OPS_FL_ENABLED;
Jiri Olsae2484912012-02-15 15:51:48 +0100323 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
324 if (control_ops_alloc(ops))
325 return -ENOMEM;
326 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400327 } else
328 add_ftrace_ops(&ftrace_ops_list, ops);
329
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400330 if (ftrace_enabled)
331 update_ftrace_function();
Steven Rostedt3d083392008-05-12 21:20:42 +0200332
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200333 return 0;
334}
335
Ingo Molnare309b412008-05-12 21:20:51 +0200336static int __unregister_ftrace_function(struct ftrace_ops *ops)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200337{
Steven Rostedt2b499382011-05-03 22:49:52 -0400338 int ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200339
Steven Rostedt2b499382011-05-03 22:49:52 -0400340 if (ftrace_disabled)
341 return -ENODEV;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200342
Steven Rostedtb8489142011-05-04 09:27:52 -0400343 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
344 return -EBUSY;
345
Steven Rostedt2b499382011-05-03 22:49:52 -0400346 if (FTRACE_WARN_ON(ops == &global_ops))
347 return -EINVAL;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200348
Steven Rostedtb8489142011-05-04 09:27:52 -0400349 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
Jiri Olsae2484912012-02-15 15:51:48 +0100350 ret = remove_ftrace_list_ops(&ftrace_global_list,
351 &global_ops, ops);
Steven Rostedtb8489142011-05-04 09:27:52 -0400352 if (!ret)
353 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Jiri Olsae2484912012-02-15 15:51:48 +0100354 } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
355 ret = remove_ftrace_list_ops(&ftrace_control_list,
356 &control_ops, ops);
357 if (!ret) {
358 /*
359 * The ftrace_ops is now removed from the list,
360 * so there'll be no new users. We must ensure
361 * all current users are done before we free
362 * the control data.
363 */
364 synchronize_sched();
365 control_ops_free(ops);
366 }
Steven Rostedtb8489142011-05-04 09:27:52 -0400367 } else
368 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
369
Steven Rostedt2b499382011-05-03 22:49:52 -0400370 if (ret < 0)
371 return ret;
Steven Rostedtb8489142011-05-04 09:27:52 -0400372
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400373 if (ftrace_enabled)
374 update_ftrace_function();
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200375
Steven Rostedtcdbe61b2011-05-05 21:14:55 -0400376 /*
377 * Dynamic ops may be freed, we must make sure that all
378 * callers are done before leaving this function.
379 */
380 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
381 synchronize_sched();
382
Steven Rostedte6ea44e2009-02-14 01:42:44 -0500383 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +0200384}
385
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500386static void ftrace_update_pid_func(void)
387{
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400388 /* Only do something if we are tracing something */
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500389 if (ftrace_trace_function == ftrace_stub)
KOSAKI Motohiro10dd3eb2009-03-06 15:29:04 +0900390 return;
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500391
Steven Rostedt491d0dc2011-04-27 21:43:36 -0400392 update_ftrace_function();
Steven Rostedtdf4fc312008-11-26 00:16:23 -0500393}
394
Steven Rostedt493762f2009-03-23 17:12:36 -0400395#ifdef CONFIG_FUNCTION_PROFILER
396struct ftrace_profile {
397 struct hlist_node node;
398 unsigned long ip;
399 unsigned long counter;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400400#ifdef CONFIG_FUNCTION_GRAPH_TRACER
401 unsigned long long time;
Chase Douglase330b3b2010-04-26 14:02:05 -0400402 unsigned long long time_squared;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400403#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400404};
405
406struct ftrace_profile_page {
407 struct ftrace_profile_page *next;
408 unsigned long index;
409 struct ftrace_profile records[];
410};
411
Steven Rostedtcafb1682009-03-24 20:50:39 -0400412struct ftrace_profile_stat {
413 atomic_t disabled;
414 struct hlist_head *hash;
415 struct ftrace_profile_page *pages;
416 struct ftrace_profile_page *start;
417 struct tracer_stat stat;
418};
419
Steven Rostedt493762f2009-03-23 17:12:36 -0400420#define PROFILE_RECORDS_SIZE \
421 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
422
423#define PROFILES_PER_PAGE \
424 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
425
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400426static int ftrace_profile_bits __read_mostly;
427static int ftrace_profile_enabled __read_mostly;
428
429/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
Steven Rostedt493762f2009-03-23 17:12:36 -0400430static DEFINE_MUTEX(ftrace_profile_lock);
431
Steven Rostedtcafb1682009-03-24 20:50:39 -0400432static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
Steven Rostedt493762f2009-03-23 17:12:36 -0400433
434#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
435
Steven Rostedt493762f2009-03-23 17:12:36 -0400436static void *
437function_stat_next(void *v, int idx)
438{
439 struct ftrace_profile *rec = v;
440 struct ftrace_profile_page *pg;
441
442 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
443
444 again:
Li Zefan0296e422009-06-26 11:15:37 +0800445 if (idx != 0)
446 rec++;
447
Steven Rostedt493762f2009-03-23 17:12:36 -0400448 if ((void *)rec >= (void *)&pg->records[pg->index]) {
449 pg = pg->next;
450 if (!pg)
451 return NULL;
452 rec = &pg->records[0];
453 if (!rec->counter)
454 goto again;
455 }
456
457 return rec;
458}
459
460static void *function_stat_start(struct tracer_stat *trace)
461{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400462 struct ftrace_profile_stat *stat =
463 container_of(trace, struct ftrace_profile_stat, stat);
464
465 if (!stat || !stat->start)
466 return NULL;
467
468 return function_stat_next(&stat->start->records[0], 0);
Steven Rostedt493762f2009-03-23 17:12:36 -0400469}
470
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400471#ifdef CONFIG_FUNCTION_GRAPH_TRACER
472/* function graph compares on total time */
473static int function_stat_cmp(void *p1, void *p2)
474{
475 struct ftrace_profile *a = p1;
476 struct ftrace_profile *b = p2;
477
478 if (a->time < b->time)
479 return -1;
480 if (a->time > b->time)
481 return 1;
482 else
483 return 0;
484}
485#else
486/* not function graph compares against hits */
Steven Rostedt493762f2009-03-23 17:12:36 -0400487static int function_stat_cmp(void *p1, void *p2)
488{
489 struct ftrace_profile *a = p1;
490 struct ftrace_profile *b = p2;
491
492 if (a->counter < b->counter)
493 return -1;
494 if (a->counter > b->counter)
495 return 1;
496 else
497 return 0;
498}
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400499#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400500
501static int function_stat_headers(struct seq_file *m)
502{
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400503#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400504 seq_printf(m, " Function "
Chase Douglase330b3b2010-04-26 14:02:05 -0400505 "Hit Time Avg s^2\n"
Steven Rostedt34886c82009-03-25 21:00:47 -0400506 " -------- "
Chase Douglase330b3b2010-04-26 14:02:05 -0400507 "--- ---- --- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400508#else
Steven Rostedt493762f2009-03-23 17:12:36 -0400509 seq_printf(m, " Function Hit\n"
510 " -------- ---\n");
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400511#endif
Steven Rostedt493762f2009-03-23 17:12:36 -0400512 return 0;
513}
514
515static int function_stat_show(struct seq_file *m, void *v)
516{
517 struct ftrace_profile *rec = v;
518 char str[KSYM_SYMBOL_LEN];
Li Zefan3aaba202010-08-23 16:50:12 +0800519 int ret = 0;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400520#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt34886c82009-03-25 21:00:47 -0400521 static struct trace_seq s;
522 unsigned long long avg;
Chase Douglase330b3b2010-04-26 14:02:05 -0400523 unsigned long long stddev;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400524#endif
Li Zefan3aaba202010-08-23 16:50:12 +0800525 mutex_lock(&ftrace_profile_lock);
526
527 /* we raced with function_profile_reset() */
528 if (unlikely(rec->counter == 0)) {
529 ret = -EBUSY;
530 goto out;
531 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400532
533 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400534 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
Steven Rostedt493762f2009-03-23 17:12:36 -0400535
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400536#ifdef CONFIG_FUNCTION_GRAPH_TRACER
537 seq_printf(m, " ");
Steven Rostedt34886c82009-03-25 21:00:47 -0400538 avg = rec->time;
539 do_div(avg, rec->counter);
540
Chase Douglase330b3b2010-04-26 14:02:05 -0400541 /* Sample standard deviation (s^2) */
542 if (rec->counter <= 1)
543 stddev = 0;
544 else {
545 stddev = rec->time_squared - rec->counter * avg * avg;
546 /*
547 * Divide only 1000 for ns^2 -> us^2 conversion.
548 * trace_print_graph_duration will divide 1000 again.
549 */
550 do_div(stddev, (rec->counter - 1) * 1000);
551 }
552
Steven Rostedt34886c82009-03-25 21:00:47 -0400553 trace_seq_init(&s);
554 trace_print_graph_duration(rec->time, &s);
555 trace_seq_puts(&s, " ");
556 trace_print_graph_duration(avg, &s);
Chase Douglase330b3b2010-04-26 14:02:05 -0400557 trace_seq_puts(&s, " ");
558 trace_print_graph_duration(stddev, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400559 trace_print_seq(m, &s);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400560#endif
561 seq_putc(m, '\n');
Li Zefan3aaba202010-08-23 16:50:12 +0800562out:
563 mutex_unlock(&ftrace_profile_lock);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400564
Li Zefan3aaba202010-08-23 16:50:12 +0800565 return ret;
Steven Rostedt493762f2009-03-23 17:12:36 -0400566}
567
Steven Rostedtcafb1682009-03-24 20:50:39 -0400568static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400569{
570 struct ftrace_profile_page *pg;
571
Steven Rostedtcafb1682009-03-24 20:50:39 -0400572 pg = stat->pages = stat->start;
Steven Rostedt493762f2009-03-23 17:12:36 -0400573
574 while (pg) {
575 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
576 pg->index = 0;
577 pg = pg->next;
578 }
579
Steven Rostedtcafb1682009-03-24 20:50:39 -0400580 memset(stat->hash, 0,
Steven Rostedt493762f2009-03-23 17:12:36 -0400581 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
582}
583
Steven Rostedtcafb1682009-03-24 20:50:39 -0400584int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
Steven Rostedt493762f2009-03-23 17:12:36 -0400585{
586 struct ftrace_profile_page *pg;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400587 int functions;
588 int pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400589 int i;
590
591 /* If we already allocated, do nothing */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400592 if (stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400593 return 0;
594
Steven Rostedtcafb1682009-03-24 20:50:39 -0400595 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
596 if (!stat->pages)
Steven Rostedt493762f2009-03-23 17:12:36 -0400597 return -ENOMEM;
598
Steven Rostedt318e0a72009-03-25 20:06:34 -0400599#ifdef CONFIG_DYNAMIC_FTRACE
600 functions = ftrace_update_tot_cnt;
601#else
602 /*
603 * We do not know the number of functions that exist because
604 * dynamic tracing is what counts them. With past experience
605 * we have around 20K functions. That should be more than enough.
606 * It is highly unlikely we will execute every function in
607 * the kernel.
608 */
609 functions = 20000;
610#endif
611
Steven Rostedtcafb1682009-03-24 20:50:39 -0400612 pg = stat->start = stat->pages;
Steven Rostedt493762f2009-03-23 17:12:36 -0400613
Steven Rostedt318e0a72009-03-25 20:06:34 -0400614 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
615
616 for (i = 0; i < pages; i++) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400617 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400618 if (!pg->next)
Steven Rostedt318e0a72009-03-25 20:06:34 -0400619 goto out_free;
Steven Rostedt493762f2009-03-23 17:12:36 -0400620 pg = pg->next;
621 }
622
623 return 0;
Steven Rostedt318e0a72009-03-25 20:06:34 -0400624
625 out_free:
626 pg = stat->start;
627 while (pg) {
628 unsigned long tmp = (unsigned long)pg;
629
630 pg = pg->next;
631 free_page(tmp);
632 }
633
634 free_page((unsigned long)stat->pages);
635 stat->pages = NULL;
636 stat->start = NULL;
637
638 return -ENOMEM;
Steven Rostedt493762f2009-03-23 17:12:36 -0400639}
640
Steven Rostedtcafb1682009-03-24 20:50:39 -0400641static int ftrace_profile_init_cpu(int cpu)
Steven Rostedt493762f2009-03-23 17:12:36 -0400642{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400643 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400644 int size;
645
Steven Rostedtcafb1682009-03-24 20:50:39 -0400646 stat = &per_cpu(ftrace_profile_stats, cpu);
647
648 if (stat->hash) {
Steven Rostedt493762f2009-03-23 17:12:36 -0400649 /* If the profile is already created, simply reset it */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400650 ftrace_profile_reset(stat);
Steven Rostedt493762f2009-03-23 17:12:36 -0400651 return 0;
652 }
653
654 /*
655 * We are profiling all functions, but usually only a few thousand
656 * functions are hit. We'll make a hash of 1024 items.
657 */
658 size = FTRACE_PROFILE_HASH_SIZE;
659
Steven Rostedtcafb1682009-03-24 20:50:39 -0400660 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
Steven Rostedt493762f2009-03-23 17:12:36 -0400661
Steven Rostedtcafb1682009-03-24 20:50:39 -0400662 if (!stat->hash)
Steven Rostedt493762f2009-03-23 17:12:36 -0400663 return -ENOMEM;
664
Steven Rostedtcafb1682009-03-24 20:50:39 -0400665 if (!ftrace_profile_bits) {
666 size--;
Steven Rostedt493762f2009-03-23 17:12:36 -0400667
Steven Rostedtcafb1682009-03-24 20:50:39 -0400668 for (; size; size >>= 1)
669 ftrace_profile_bits++;
670 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400671
Steven Rostedt318e0a72009-03-25 20:06:34 -0400672 /* Preallocate the function profiling pages */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400673 if (ftrace_profile_pages_init(stat) < 0) {
674 kfree(stat->hash);
675 stat->hash = NULL;
Steven Rostedt493762f2009-03-23 17:12:36 -0400676 return -ENOMEM;
677 }
678
679 return 0;
680}
681
Steven Rostedtcafb1682009-03-24 20:50:39 -0400682static int ftrace_profile_init(void)
683{
684 int cpu;
685 int ret = 0;
686
687 for_each_online_cpu(cpu) {
688 ret = ftrace_profile_init_cpu(cpu);
689 if (ret)
690 break;
691 }
692
693 return ret;
694}
695
Steven Rostedt493762f2009-03-23 17:12:36 -0400696/* interrupts must be disabled */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400697static struct ftrace_profile *
698ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400699{
700 struct ftrace_profile *rec;
701 struct hlist_head *hhd;
702 struct hlist_node *n;
703 unsigned long key;
704
705 key = hash_long(ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400706 hhd = &stat->hash[key];
Steven Rostedt493762f2009-03-23 17:12:36 -0400707
708 if (hlist_empty(hhd))
709 return NULL;
710
711 hlist_for_each_entry_rcu(rec, n, hhd, node) {
712 if (rec->ip == ip)
713 return rec;
714 }
715
716 return NULL;
717}
718
Steven Rostedtcafb1682009-03-24 20:50:39 -0400719static void ftrace_add_profile(struct ftrace_profile_stat *stat,
720 struct ftrace_profile *rec)
Steven Rostedt493762f2009-03-23 17:12:36 -0400721{
722 unsigned long key;
723
724 key = hash_long(rec->ip, ftrace_profile_bits);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400725 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
Steven Rostedt493762f2009-03-23 17:12:36 -0400726}
727
Steven Rostedt318e0a72009-03-25 20:06:34 -0400728/*
729 * The memory is already allocated, this simply finds a new record to use.
730 */
Steven Rostedt493762f2009-03-23 17:12:36 -0400731static struct ftrace_profile *
Steven Rostedt318e0a72009-03-25 20:06:34 -0400732ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
Steven Rostedt493762f2009-03-23 17:12:36 -0400733{
734 struct ftrace_profile *rec = NULL;
735
Steven Rostedt318e0a72009-03-25 20:06:34 -0400736 /* prevent recursion (from NMIs) */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400737 if (atomic_inc_return(&stat->disabled) != 1)
Steven Rostedt493762f2009-03-23 17:12:36 -0400738 goto out;
739
Steven Rostedt493762f2009-03-23 17:12:36 -0400740 /*
Steven Rostedt318e0a72009-03-25 20:06:34 -0400741 * Try to find the function again since an NMI
742 * could have added it
Steven Rostedt493762f2009-03-23 17:12:36 -0400743 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400744 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400745 if (rec)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400746 goto out;
Steven Rostedt493762f2009-03-23 17:12:36 -0400747
Steven Rostedtcafb1682009-03-24 20:50:39 -0400748 if (stat->pages->index == PROFILES_PER_PAGE) {
749 if (!stat->pages->next)
750 goto out;
751 stat->pages = stat->pages->next;
Steven Rostedt493762f2009-03-23 17:12:36 -0400752 }
753
Steven Rostedtcafb1682009-03-24 20:50:39 -0400754 rec = &stat->pages->records[stat->pages->index++];
Steven Rostedt493762f2009-03-23 17:12:36 -0400755 rec->ip = ip;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400756 ftrace_add_profile(stat, rec);
Steven Rostedt493762f2009-03-23 17:12:36 -0400757
Steven Rostedt493762f2009-03-23 17:12:36 -0400758 out:
Steven Rostedtcafb1682009-03-24 20:50:39 -0400759 atomic_dec(&stat->disabled);
Steven Rostedt493762f2009-03-23 17:12:36 -0400760
761 return rec;
762}
763
Steven Rostedt493762f2009-03-23 17:12:36 -0400764static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400765function_profile_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400766 struct ftrace_ops *ops, struct pt_regs *regs)
Steven Rostedt493762f2009-03-23 17:12:36 -0400767{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400768 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400769 struct ftrace_profile *rec;
770 unsigned long flags;
Steven Rostedt493762f2009-03-23 17:12:36 -0400771
772 if (!ftrace_profile_enabled)
773 return;
774
Steven Rostedt493762f2009-03-23 17:12:36 -0400775 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400776
777 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400778 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400779 goto out;
780
781 rec = ftrace_find_profiled_func(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400782 if (!rec) {
Steven Rostedt318e0a72009-03-25 20:06:34 -0400783 rec = ftrace_profile_alloc(stat, ip);
Steven Rostedt493762f2009-03-23 17:12:36 -0400784 if (!rec)
785 goto out;
786 }
787
788 rec->counter++;
789 out:
790 local_irq_restore(flags);
791}
792
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400793#ifdef CONFIG_FUNCTION_GRAPH_TRACER
794static int profile_graph_entry(struct ftrace_graph_ent *trace)
795{
Steven Rostedta1e2e312011-08-09 12:50:46 -0400796 function_profile_call(trace->func, 0, NULL, NULL);
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400797 return 1;
798}
799
800static void profile_graph_return(struct ftrace_graph_ret *trace)
801{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400802 struct ftrace_profile_stat *stat;
Steven Rostedta2a16d62009-03-24 23:17:58 -0400803 unsigned long long calltime;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400804 struct ftrace_profile *rec;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400805 unsigned long flags;
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400806
807 local_irq_save(flags);
Steven Rostedtcafb1682009-03-24 20:50:39 -0400808 stat = &__get_cpu_var(ftrace_profile_stats);
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400809 if (!stat->hash || !ftrace_profile_enabled)
Steven Rostedtcafb1682009-03-24 20:50:39 -0400810 goto out;
811
Steven Rostedt37e44bc2010-04-27 21:04:24 -0400812 /* If the calltime was zero'd ignore it */
813 if (!trace->calltime)
814 goto out;
815
Steven Rostedta2a16d62009-03-24 23:17:58 -0400816 calltime = trace->rettime - trace->calltime;
817
818 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
819 int index;
820
821 index = trace->depth;
822
823 /* Append this call time to the parent time to subtract */
824 if (index)
825 current->ret_stack[index - 1].subtime += calltime;
826
827 if (current->ret_stack[index].subtime < calltime)
828 calltime -= current->ret_stack[index].subtime;
829 else
830 calltime = 0;
831 }
832
Steven Rostedtcafb1682009-03-24 20:50:39 -0400833 rec = ftrace_find_profiled_func(stat, trace->func);
Chase Douglase330b3b2010-04-26 14:02:05 -0400834 if (rec) {
Steven Rostedta2a16d62009-03-24 23:17:58 -0400835 rec->time += calltime;
Chase Douglase330b3b2010-04-26 14:02:05 -0400836 rec->time_squared += calltime * calltime;
837 }
Steven Rostedta2a16d62009-03-24 23:17:58 -0400838
Steven Rostedtcafb1682009-03-24 20:50:39 -0400839 out:
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400840 local_irq_restore(flags);
841}
842
843static int register_ftrace_profiler(void)
844{
845 return register_ftrace_graph(&profile_graph_return,
846 &profile_graph_entry);
847}
848
849static void unregister_ftrace_profiler(void)
850{
851 unregister_ftrace_graph();
852}
853#else
Paul McQuadebd38c0e2011-05-31 20:51:55 +0100854static struct ftrace_ops ftrace_profile_ops __read_mostly = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400855 .func = function_profile_call,
Steven Rostedt493762f2009-03-23 17:12:36 -0400856};
857
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400858static int register_ftrace_profiler(void)
859{
860 return register_ftrace_function(&ftrace_profile_ops);
861}
862
863static void unregister_ftrace_profiler(void)
864{
865 unregister_ftrace_function(&ftrace_profile_ops);
866}
867#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
868
Steven Rostedt493762f2009-03-23 17:12:36 -0400869static ssize_t
870ftrace_profile_write(struct file *filp, const char __user *ubuf,
871 size_t cnt, loff_t *ppos)
872{
873 unsigned long val;
Steven Rostedt493762f2009-03-23 17:12:36 -0400874 int ret;
875
Peter Huewe22fe9b52011-06-07 21:58:27 +0200876 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
877 if (ret)
Steven Rostedt493762f2009-03-23 17:12:36 -0400878 return ret;
879
880 val = !!val;
881
882 mutex_lock(&ftrace_profile_lock);
883 if (ftrace_profile_enabled ^ val) {
884 if (val) {
885 ret = ftrace_profile_init();
886 if (ret < 0) {
887 cnt = ret;
888 goto out;
889 }
890
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400891 ret = register_ftrace_profiler();
892 if (ret < 0) {
893 cnt = ret;
894 goto out;
895 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400896 ftrace_profile_enabled = 1;
897 } else {
898 ftrace_profile_enabled = 0;
Steven Rostedt0f6ce3d2009-06-01 21:51:28 -0400899 /*
900 * unregister_ftrace_profiler calls stop_machine
901 * so this acts like an synchronize_sched.
902 */
Steven Rostedt0706f1c2009-03-23 23:12:58 -0400903 unregister_ftrace_profiler();
Steven Rostedt493762f2009-03-23 17:12:36 -0400904 }
905 }
906 out:
907 mutex_unlock(&ftrace_profile_lock);
908
Jiri Olsacf8517c2009-10-23 19:36:16 -0400909 *ppos += cnt;
Steven Rostedt493762f2009-03-23 17:12:36 -0400910
911 return cnt;
912}
913
914static ssize_t
915ftrace_profile_read(struct file *filp, char __user *ubuf,
916 size_t cnt, loff_t *ppos)
917{
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400918 char buf[64]; /* big enough to hold a number */
Steven Rostedt493762f2009-03-23 17:12:36 -0400919 int r;
920
921 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
923}
924
925static const struct file_operations ftrace_profile_fops = {
926 .open = tracing_open_generic,
927 .read = ftrace_profile_read,
928 .write = ftrace_profile_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200929 .llseek = default_llseek,
Steven Rostedt493762f2009-03-23 17:12:36 -0400930};
931
Steven Rostedtcafb1682009-03-24 20:50:39 -0400932/* used to initialize the real stat files */
933static struct tracer_stat function_stats __initdata = {
Steven Rostedtfb9fb012009-03-25 13:26:41 -0400934 .name = "functions",
935 .stat_start = function_stat_start,
936 .stat_next = function_stat_next,
937 .stat_cmp = function_stat_cmp,
938 .stat_headers = function_stat_headers,
939 .stat_show = function_stat_show
Steven Rostedtcafb1682009-03-24 20:50:39 -0400940};
941
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400942static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400943{
Steven Rostedtcafb1682009-03-24 20:50:39 -0400944 struct ftrace_profile_stat *stat;
Steven Rostedt493762f2009-03-23 17:12:36 -0400945 struct dentry *entry;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400946 char *name;
Steven Rostedt493762f2009-03-23 17:12:36 -0400947 int ret;
Steven Rostedtcafb1682009-03-24 20:50:39 -0400948 int cpu;
Steven Rostedt493762f2009-03-23 17:12:36 -0400949
Steven Rostedtcafb1682009-03-24 20:50:39 -0400950 for_each_possible_cpu(cpu) {
951 stat = &per_cpu(ftrace_profile_stats, cpu);
952
953 /* allocate enough for function name + cpu number */
954 name = kmalloc(32, GFP_KERNEL);
955 if (!name) {
956 /*
957 * The files created are permanent, if something happens
958 * we still do not free memory.
959 */
Steven Rostedtcafb1682009-03-24 20:50:39 -0400960 WARN(1,
961 "Could not allocate stat file for cpu %d\n",
962 cpu);
963 return;
964 }
965 stat->stat = function_stats;
966 snprintf(name, 32, "function%d", cpu);
967 stat->stat.name = name;
968 ret = register_stat_tracer(&stat->stat);
969 if (ret) {
970 WARN(1,
971 "Could not register function stat for cpu %d\n",
972 cpu);
973 kfree(name);
974 return;
975 }
Steven Rostedt493762f2009-03-23 17:12:36 -0400976 }
977
978 entry = debugfs_create_file("function_profile_enabled", 0644,
979 d_tracer, NULL, &ftrace_profile_fops);
980 if (!entry)
981 pr_warning("Could not create debugfs "
982 "'function_profile_enabled' entry\n");
983}
984
985#else /* CONFIG_FUNCTION_PROFILER */
Steven Rostedt6ab5d662009-06-04 00:55:45 -0400986static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
Steven Rostedt493762f2009-03-23 17:12:36 -0400987{
988}
989#endif /* CONFIG_FUNCTION_PROFILER */
990
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100991static struct pid * const ftrace_swapper_pid = &init_struct_pid;
992
Steven Rostedt3d083392008-05-12 21:20:42 +0200993#ifdef CONFIG_DYNAMIC_FTRACE
Ingo Molnar73d3fd92009-02-17 11:48:18 +0100994
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400995#ifndef CONFIG_FTRACE_MCOUNT_RECORD
Steven Rostedtcb7be3b2008-10-23 09:33:05 -0400996# error Dynamic ftrace depends on MCOUNT_RECORD
Steven Rostedt99ecdc42008-08-15 21:40:05 -0400997#endif
998
Steven Rostedt8fc0c702009-02-16 15:28:00 -0500999static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1000
Steven Rostedtb6887d72009-02-17 12:32:04 -05001001struct ftrace_func_probe {
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001002 struct hlist_node node;
Steven Rostedtb6887d72009-02-17 12:32:04 -05001003 struct ftrace_probe_ops *ops;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05001004 unsigned long flags;
1005 unsigned long ip;
1006 void *data;
1007 struct rcu_head rcu;
1008};
1009
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001010struct ftrace_func_entry {
1011 struct hlist_node hlist;
1012 unsigned long ip;
1013};
1014
1015struct ftrace_hash {
1016 unsigned long size_bits;
1017 struct hlist_head *buckets;
1018 unsigned long count;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001019 struct rcu_head rcu;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001020};
1021
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001022/*
1023 * We make these constant because no one should touch them,
1024 * but they are used as the default "empty hash", to avoid allocating
1025 * it all the time. These are in a read only section such that if
1026 * anyone does try to modify it, it will cause an exception.
1027 */
1028static const struct hlist_head empty_buckets[1];
1029static const struct ftrace_hash empty_hash = {
1030 .buckets = (struct hlist_head *)empty_buckets,
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001031};
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001032#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02001033
Steven Rostedt2b499382011-05-03 22:49:52 -04001034static struct ftrace_ops global_ops = {
Steven Rostedtf45948e2011-05-02 12:29:25 -04001035 .func = ftrace_stub,
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001036 .notrace_hash = EMPTY_HASH,
1037 .filter_hash = EMPTY_HASH,
Steven Rostedtf45948e2011-05-02 12:29:25 -04001038};
1039
Steven Rostedt41c52c02008-05-22 11:46:33 -04001040static DEFINE_MUTEX(ftrace_regex_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02001041
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001042struct ftrace_page {
1043 struct ftrace_page *next;
Steven Rostedta7900872011-12-16 16:23:44 -05001044 struct dyn_ftrace *records;
Steven Rostedt431aa3f2009-01-06 12:43:01 -05001045 int index;
Steven Rostedta7900872011-12-16 16:23:44 -05001046 int size;
David Milleraa5e5ce2008-05-13 22:06:56 -07001047};
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001048
Steven Rostedt85ae32a2011-12-16 16:30:31 -05001049static struct ftrace_page *ftrace_new_pgs;
1050
Steven Rostedta7900872011-12-16 16:23:44 -05001051#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1052#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001053
1054/* estimate from running different kernels */
1055#define NR_TO_INIT 10000
1056
1057static struct ftrace_page *ftrace_pages_start;
1058static struct ftrace_page *ftrace_pages;
1059
Steven Rostedt06a51d92011-12-19 19:07:36 -05001060static bool ftrace_hash_empty(struct ftrace_hash *hash)
1061{
1062 return !hash || !hash->count;
1063}
1064
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001065static struct ftrace_func_entry *
1066ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1067{
1068 unsigned long key;
1069 struct ftrace_func_entry *entry;
1070 struct hlist_head *hhd;
1071 struct hlist_node *n;
1072
Steven Rostedt06a51d92011-12-19 19:07:36 -05001073 if (ftrace_hash_empty(hash))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001074 return NULL;
1075
1076 if (hash->size_bits > 0)
1077 key = hash_long(ip, hash->size_bits);
1078 else
1079 key = 0;
1080
1081 hhd = &hash->buckets[key];
1082
1083 hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1084 if (entry->ip == ip)
1085 return entry;
1086 }
1087 return NULL;
1088}
1089
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001090static void __add_hash_entry(struct ftrace_hash *hash,
1091 struct ftrace_func_entry *entry)
1092{
1093 struct hlist_head *hhd;
1094 unsigned long key;
1095
1096 if (hash->size_bits)
1097 key = hash_long(entry->ip, hash->size_bits);
1098 else
1099 key = 0;
1100
1101 hhd = &hash->buckets[key];
1102 hlist_add_head(&entry->hlist, hhd);
1103 hash->count++;
1104}
1105
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001106static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1107{
1108 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001109
1110 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1111 if (!entry)
1112 return -ENOMEM;
1113
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001114 entry->ip = ip;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001115 __add_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001116
1117 return 0;
1118}
1119
1120static void
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001121free_hash_entry(struct ftrace_hash *hash,
1122 struct ftrace_func_entry *entry)
1123{
1124 hlist_del(&entry->hlist);
1125 kfree(entry);
1126 hash->count--;
1127}
1128
1129static void
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001130remove_hash_entry(struct ftrace_hash *hash,
1131 struct ftrace_func_entry *entry)
1132{
1133 hlist_del(&entry->hlist);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001134 hash->count--;
1135}
1136
1137static void ftrace_hash_clear(struct ftrace_hash *hash)
1138{
1139 struct hlist_head *hhd;
1140 struct hlist_node *tp, *tn;
1141 struct ftrace_func_entry *entry;
1142 int size = 1 << hash->size_bits;
1143 int i;
1144
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001145 if (!hash->count)
1146 return;
1147
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001148 for (i = 0; i < size; i++) {
1149 hhd = &hash->buckets[i];
1150 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001151 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04001152 }
1153 FTRACE_WARN_ON(hash->count);
1154}
1155
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001156static void free_ftrace_hash(struct ftrace_hash *hash)
1157{
1158 if (!hash || hash == EMPTY_HASH)
1159 return;
1160 ftrace_hash_clear(hash);
1161 kfree(hash->buckets);
1162 kfree(hash);
1163}
1164
Steven Rostedt07fd5512011-05-05 18:03:47 -04001165static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1166{
1167 struct ftrace_hash *hash;
1168
1169 hash = container_of(rcu, struct ftrace_hash, rcu);
1170 free_ftrace_hash(hash);
1171}
1172
1173static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1174{
1175 if (!hash || hash == EMPTY_HASH)
1176 return;
1177 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1178}
1179
Jiri Olsa5500fa52012-02-15 15:51:54 +01001180void ftrace_free_filter(struct ftrace_ops *ops)
1181{
1182 free_ftrace_hash(ops->filter_hash);
1183 free_ftrace_hash(ops->notrace_hash);
1184}
1185
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001186static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1187{
1188 struct ftrace_hash *hash;
1189 int size;
1190
1191 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1192 if (!hash)
1193 return NULL;
1194
1195 size = 1 << size_bits;
Thomas Meyer47b0edc2011-11-29 22:08:00 +01001196 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001197
1198 if (!hash->buckets) {
1199 kfree(hash);
1200 return NULL;
1201 }
1202
1203 hash->size_bits = size_bits;
1204
1205 return hash;
1206}
1207
1208static struct ftrace_hash *
1209alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1210{
1211 struct ftrace_func_entry *entry;
1212 struct ftrace_hash *new_hash;
1213 struct hlist_node *tp;
1214 int size;
1215 int ret;
1216 int i;
1217
1218 new_hash = alloc_ftrace_hash(size_bits);
1219 if (!new_hash)
1220 return NULL;
1221
1222 /* Empty hash? */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001223 if (ftrace_hash_empty(hash))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001224 return new_hash;
1225
1226 size = 1 << hash->size_bits;
1227 for (i = 0; i < size; i++) {
1228 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1229 ret = add_hash_entry(new_hash, entry->ip);
1230 if (ret < 0)
1231 goto free_hash;
1232 }
1233 }
1234
1235 FTRACE_WARN_ON(new_hash->count != hash->count);
1236
1237 return new_hash;
1238
1239 free_hash:
1240 free_ftrace_hash(new_hash);
1241 return NULL;
1242}
1243
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001244static void
1245ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1246static void
1247ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1248
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001249static int
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001250ftrace_hash_move(struct ftrace_ops *ops, int enable,
1251 struct ftrace_hash **dst, struct ftrace_hash *src)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001252{
1253 struct ftrace_func_entry *entry;
1254 struct hlist_node *tp, *tn;
1255 struct hlist_head *hhd;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001256 struct ftrace_hash *old_hash;
1257 struct ftrace_hash *new_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001258 unsigned long key;
1259 int size = src->count;
1260 int bits = 0;
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001261 int ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001262 int i;
1263
1264 /*
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001265 * Remove the current set, update the hash and add
1266 * them back.
1267 */
1268 ftrace_hash_rec_disable(ops, enable);
1269
1270 /*
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001271 * If the new source is empty, just free dst and assign it
1272 * the empty_hash.
1273 */
1274 if (!src->count) {
Steven Rostedt07fd5512011-05-05 18:03:47 -04001275 free_ftrace_hash_rcu(*dst);
1276 rcu_assign_pointer(*dst, EMPTY_HASH);
Steven Rostedtd4d34b92011-11-04 20:32:39 -04001277 /* still need to update the function records */
1278 ret = 0;
1279 goto out;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001280 }
1281
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001282 /*
1283 * Make the hash size about 1/2 the # found
1284 */
1285 for (size /= 2; size; size >>= 1)
1286 bits++;
1287
1288 /* Don't allocate too much */
1289 if (bits > FTRACE_HASH_MAX_BITS)
1290 bits = FTRACE_HASH_MAX_BITS;
1291
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001292 ret = -ENOMEM;
Steven Rostedt07fd5512011-05-05 18:03:47 -04001293 new_hash = alloc_ftrace_hash(bits);
1294 if (!new_hash)
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001295 goto out;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001296
1297 size = 1 << src->size_bits;
1298 for (i = 0; i < size; i++) {
1299 hhd = &src->buckets[i];
1300 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1301 if (bits > 0)
1302 key = hash_long(entry->ip, bits);
1303 else
1304 key = 0;
1305 remove_hash_entry(src, entry);
Steven Rostedt07fd5512011-05-05 18:03:47 -04001306 __add_hash_entry(new_hash, entry);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001307 }
1308 }
1309
Steven Rostedt07fd5512011-05-05 18:03:47 -04001310 old_hash = *dst;
1311 rcu_assign_pointer(*dst, new_hash);
1312 free_ftrace_hash_rcu(old_hash);
1313
Steven Rostedt41fb61c2011-07-13 15:03:44 -04001314 ret = 0;
1315 out:
1316 /*
1317 * Enable regardless of ret:
1318 * On success, we enable the new hash.
1319 * On failure, we re-enable the original hash.
1320 */
1321 ftrace_hash_rec_enable(ops, enable);
1322
1323 return ret;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04001324}
1325
Steven Rostedt265c8312009-02-13 12:43:56 -05001326/*
Steven Rostedtb8489142011-05-04 09:27:52 -04001327 * Test the hashes for this ops to see if we want to call
1328 * the ops->func or not.
1329 *
1330 * It's a match if the ip is in the ops->filter_hash or
1331 * the filter_hash does not exist or is empty,
1332 * AND
1333 * the ip is not in the ops->notrace_hash.
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04001334 *
1335 * This needs to be called with preemption disabled as
1336 * the hashes are freed with call_rcu_sched().
Steven Rostedtb8489142011-05-04 09:27:52 -04001337 */
1338static int
1339ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1340{
1341 struct ftrace_hash *filter_hash;
1342 struct ftrace_hash *notrace_hash;
1343 int ret;
1344
Steven Rostedtb8489142011-05-04 09:27:52 -04001345 filter_hash = rcu_dereference_raw(ops->filter_hash);
1346 notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1347
Steven Rostedt06a51d92011-12-19 19:07:36 -05001348 if ((ftrace_hash_empty(filter_hash) ||
Steven Rostedtb8489142011-05-04 09:27:52 -04001349 ftrace_lookup_ip(filter_hash, ip)) &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001350 (ftrace_hash_empty(notrace_hash) ||
Steven Rostedtb8489142011-05-04 09:27:52 -04001351 !ftrace_lookup_ip(notrace_hash, ip)))
1352 ret = 1;
1353 else
1354 ret = 0;
Steven Rostedtb8489142011-05-04 09:27:52 -04001355
1356 return ret;
1357}
1358
1359/*
Steven Rostedt265c8312009-02-13 12:43:56 -05001360 * This is a double for. Do not use 'break' to break out of the loop,
1361 * you must use a goto.
1362 */
1363#define do_for_each_ftrace_rec(pg, rec) \
1364 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1365 int _____i; \
1366 for (_____i = 0; _____i < pg->index; _____i++) { \
1367 rec = &pg->records[_____i];
1368
1369#define while_for_each_ftrace_rec() \
1370 } \
1371 }
Abhishek Sagarecea6562008-06-21 23:47:53 +05301372
Steven Rostedt5855fea2011-12-16 19:27:42 -05001373
1374static int ftrace_cmp_recs(const void *a, const void *b)
1375{
Steven Rostedta650e022012-04-25 13:48:13 -04001376 const struct dyn_ftrace *key = a;
1377 const struct dyn_ftrace *rec = b;
Steven Rostedt5855fea2011-12-16 19:27:42 -05001378
Steven Rostedta650e022012-04-25 13:48:13 -04001379 if (key->flags < rec->ip)
Steven Rostedt5855fea2011-12-16 19:27:42 -05001380 return -1;
Steven Rostedta650e022012-04-25 13:48:13 -04001381 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1382 return 1;
1383 return 0;
1384}
1385
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001386static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
Steven Rostedta650e022012-04-25 13:48:13 -04001387{
1388 struct ftrace_page *pg;
1389 struct dyn_ftrace *rec;
1390 struct dyn_ftrace key;
1391
1392 key.ip = start;
1393 key.flags = end; /* overload flags, as it is unsigned long */
1394
1395 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1396 if (end < pg->records[0].ip ||
1397 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1398 continue;
1399 rec = bsearch(&key, pg->records, pg->index,
1400 sizeof(struct dyn_ftrace),
1401 ftrace_cmp_recs);
1402 if (rec)
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001403 return rec->ip;
Steven Rostedta650e022012-04-25 13:48:13 -04001404 }
1405
Steven Rostedt5855fea2011-12-16 19:27:42 -05001406 return 0;
1407}
1408
Steven Rostedtc88fd862011-08-16 09:53:39 -04001409/**
1410 * ftrace_location - return true if the ip giving is a traced location
1411 * @ip: the instruction pointer to check
1412 *
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001413 * Returns rec->ip if @ip given is a pointer to a ftrace location.
Steven Rostedtc88fd862011-08-16 09:53:39 -04001414 * That is, the instruction that is either a NOP or call to
1415 * the function tracer. It checks the ftrace internal tables to
1416 * determine if the address belongs or not.
1417 */
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001418unsigned long ftrace_location(unsigned long ip)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001419{
Steven Rostedta650e022012-04-25 13:48:13 -04001420 return ftrace_location_range(ip, ip);
1421}
Steven Rostedtc88fd862011-08-16 09:53:39 -04001422
Steven Rostedta650e022012-04-25 13:48:13 -04001423/**
1424 * ftrace_text_reserved - return true if range contains an ftrace location
1425 * @start: start of range to search
1426 * @end: end of range to search (inclusive). @end points to the last byte to check.
1427 *
1428 * Returns 1 if @start and @end contains a ftrace location.
1429 * That is, the instruction that is either a NOP or call to
1430 * the function tracer. It checks the ftrace internal tables to
1431 * determine if the address belongs or not.
1432 */
1433int ftrace_text_reserved(void *start, void *end)
1434{
Steven Rostedtf0cf9732012-04-25 14:39:54 -04001435 unsigned long ret;
1436
1437 ret = ftrace_location_range((unsigned long)start,
1438 (unsigned long)end);
1439
1440 return (int)!!ret;
Steven Rostedtc88fd862011-08-16 09:53:39 -04001441}
1442
Steven Rostedted926f92011-05-03 13:25:24 -04001443static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1444 int filter_hash,
1445 bool inc)
1446{
1447 struct ftrace_hash *hash;
1448 struct ftrace_hash *other_hash;
1449 struct ftrace_page *pg;
1450 struct dyn_ftrace *rec;
1451 int count = 0;
1452 int all = 0;
1453
1454 /* Only update if the ops has been registered */
1455 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1456 return;
1457
1458 /*
1459 * In the filter_hash case:
1460 * If the count is zero, we update all records.
1461 * Otherwise we just update the items in the hash.
1462 *
1463 * In the notrace_hash case:
1464 * We enable the update in the hash.
1465 * As disabling notrace means enabling the tracing,
1466 * and enabling notrace means disabling, the inc variable
1467 * gets inversed.
1468 */
1469 if (filter_hash) {
1470 hash = ops->filter_hash;
1471 other_hash = ops->notrace_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05001472 if (ftrace_hash_empty(hash))
Steven Rostedted926f92011-05-03 13:25:24 -04001473 all = 1;
1474 } else {
1475 inc = !inc;
1476 hash = ops->notrace_hash;
1477 other_hash = ops->filter_hash;
1478 /*
1479 * If the notrace hash has no items,
1480 * then there's nothing to do.
1481 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05001482 if (ftrace_hash_empty(hash))
Steven Rostedted926f92011-05-03 13:25:24 -04001483 return;
1484 }
1485
1486 do_for_each_ftrace_rec(pg, rec) {
1487 int in_other_hash = 0;
1488 int in_hash = 0;
1489 int match = 0;
1490
1491 if (all) {
1492 /*
1493 * Only the filter_hash affects all records.
1494 * Update if the record is not in the notrace hash.
1495 */
Steven Rostedtb8489142011-05-04 09:27:52 -04001496 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
Steven Rostedted926f92011-05-03 13:25:24 -04001497 match = 1;
1498 } else {
Steven Rostedt06a51d92011-12-19 19:07:36 -05001499 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1500 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
Steven Rostedted926f92011-05-03 13:25:24 -04001501
1502 /*
1503 *
1504 */
1505 if (filter_hash && in_hash && !in_other_hash)
1506 match = 1;
1507 else if (!filter_hash && in_hash &&
Steven Rostedt06a51d92011-12-19 19:07:36 -05001508 (in_other_hash || ftrace_hash_empty(other_hash)))
Steven Rostedted926f92011-05-03 13:25:24 -04001509 match = 1;
1510 }
1511 if (!match)
1512 continue;
1513
1514 if (inc) {
1515 rec->flags++;
1516 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1517 return;
1518 } else {
1519 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1520 return;
1521 rec->flags--;
1522 }
1523 count++;
1524 /* Shortcut, if we handled all records, we are done. */
1525 if (!all && count == hash->count)
1526 return;
1527 } while_for_each_ftrace_rec();
1528}
1529
1530static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1531 int filter_hash)
1532{
1533 __ftrace_hash_rec_update(ops, filter_hash, 0);
1534}
1535
1536static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1537 int filter_hash)
1538{
1539 __ftrace_hash_rec_update(ops, filter_hash, 1);
1540}
1541
Steven Rostedt05736a42008-09-22 14:55:47 -07001542static void print_ip_ins(const char *fmt, unsigned char *p)
1543{
1544 int i;
1545
1546 printk(KERN_CONT "%s", fmt);
1547
1548 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1549 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1550}
1551
Steven Rostedtc88fd862011-08-16 09:53:39 -04001552/**
1553 * ftrace_bug - report and shutdown function tracer
1554 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1555 * @ip: The address that failed
1556 *
1557 * The arch code that enables or disables the function tracing
1558 * can call ftrace_bug() when it has detected a problem in
1559 * modifying the code. @failed should be one of either:
1560 * EFAULT - if the problem happens on reading the @ip address
1561 * EINVAL - if what is read at @ip is not what was expected
1562 * EPERM - if the problem happens on writting to the @ip address
1563 */
1564void ftrace_bug(int failed, unsigned long ip)
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001565{
1566 switch (failed) {
1567 case -EFAULT:
1568 FTRACE_WARN_ON_ONCE(1);
1569 pr_info("ftrace faulted on modifying ");
1570 print_ip_sym(ip);
1571 break;
1572 case -EINVAL:
1573 FTRACE_WARN_ON_ONCE(1);
1574 pr_info("ftrace failed to modify ");
1575 print_ip_sym(ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001576 print_ip_ins(" actual: ", (unsigned char *)ip);
Steven Rostedtb17e8a32008-11-14 16:21:19 -08001577 printk(KERN_CONT "\n");
1578 break;
1579 case -EPERM:
1580 FTRACE_WARN_ON_ONCE(1);
1581 pr_info("ftrace faulted on writing ");
1582 print_ip_sym(ip);
1583 break;
1584 default:
1585 FTRACE_WARN_ON_ONCE(1);
1586 pr_info("ftrace faulted on unknown error ");
1587 print_ip_sym(ip);
1588 }
1589}
1590
Steven Rostedtc88fd862011-08-16 09:53:39 -04001591static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
Steven Rostedt5072c592008-05-12 21:20:43 +02001592{
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001593 unsigned long flag = 0UL;
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001594
Steven Rostedt982c3502008-11-15 16:31:41 -05001595 /*
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001596 * If we are updating calls:
Steven Rostedt982c3502008-11-15 16:31:41 -05001597 *
Steven Rostedted926f92011-05-03 13:25:24 -04001598 * If the record has a ref count, then we need to enable it
1599 * because someone is using it.
Steven Rostedt982c3502008-11-15 16:31:41 -05001600 *
Steven Rostedted926f92011-05-03 13:25:24 -04001601 * Otherwise we make sure its disabled.
1602 *
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001603 * If we are disabling calls, then disable all records that
Steven Rostedted926f92011-05-03 13:25:24 -04001604 * are enabled.
Steven Rostedt982c3502008-11-15 16:31:41 -05001605 */
Steven Rostedtc88fd862011-08-16 09:53:39 -04001606 if (enable && (rec->flags & ~FTRACE_FL_MASK))
Steven Rostedted926f92011-05-03 13:25:24 -04001607 flag = FTRACE_FL_ENABLED;
Steven Rostedt5072c592008-05-12 21:20:43 +02001608
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001609 /* If the state of this record hasn't changed, then do nothing */
1610 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
Steven Rostedtc88fd862011-08-16 09:53:39 -04001611 return FTRACE_UPDATE_IGNORE;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001612
1613 if (flag) {
Steven Rostedtc88fd862011-08-16 09:53:39 -04001614 if (update)
1615 rec->flags |= FTRACE_FL_ENABLED;
1616 return FTRACE_UPDATE_MAKE_CALL;
Xiao Guangrong64fbcd12009-07-15 12:32:15 +08001617 }
1618
Steven Rostedtc88fd862011-08-16 09:53:39 -04001619 if (update)
1620 rec->flags &= ~FTRACE_FL_ENABLED;
1621
1622 return FTRACE_UPDATE_MAKE_NOP;
1623}
1624
1625/**
1626 * ftrace_update_record, set a record that now is tracing or not
1627 * @rec: the record to update
1628 * @enable: set to 1 if the record is tracing, zero to force disable
1629 *
1630 * The records that represent all functions that can be traced need
1631 * to be updated when tracing has been enabled.
1632 */
1633int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1634{
1635 return ftrace_check_record(rec, enable, 1);
1636}
1637
1638/**
1639 * ftrace_test_record, check if the record has been enabled or not
1640 * @rec: the record to test
1641 * @enable: set to 1 to check if enabled, 0 if it is disabled
1642 *
1643 * The arch code may need to test if a record is already set to
1644 * tracing to determine how to modify the function code that it
1645 * represents.
1646 */
1647int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1648{
1649 return ftrace_check_record(rec, enable, 0);
1650}
1651
1652static int
1653__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1654{
1655 unsigned long ftrace_addr;
1656 int ret;
1657
1658 ftrace_addr = (unsigned long)FTRACE_ADDR;
1659
1660 ret = ftrace_update_record(rec, enable);
1661
1662 switch (ret) {
1663 case FTRACE_UPDATE_IGNORE:
1664 return 0;
1665
1666 case FTRACE_UPDATE_MAKE_CALL:
1667 return ftrace_make_call(rec, ftrace_addr);
1668
1669 case FTRACE_UPDATE_MAKE_NOP:
1670 return ftrace_make_nop(NULL, rec, ftrace_addr);
1671 }
1672
1673 return -1; /* unknow ftrace bug */
Steven Rostedt5072c592008-05-12 21:20:43 +02001674}
1675
Steven Rostedte4f5d542012-04-27 09:13:18 -04001676void __weak ftrace_replace_code(int enable)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001677{
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001678 struct dyn_ftrace *rec;
1679 struct ftrace_page *pg;
Steven Rostedt6a24a242009-02-17 11:20:26 -05001680 int failed;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001681
Steven Rostedt45a4a232011-04-21 23:16:46 -04001682 if (unlikely(ftrace_disabled))
1683 return;
1684
Steven Rostedt265c8312009-02-13 12:43:56 -05001685 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedte4f5d542012-04-27 09:13:18 -04001686 failed = __ftrace_replace_code(rec, enable);
Zhaoleifa9d13c2009-03-13 17:16:34 +08001687 if (failed) {
Steven Rostedt3279ba32009-10-07 16:57:56 -04001688 ftrace_bug(failed, rec->ip);
1689 /* Stop processing */
1690 return;
Steven Rostedt265c8312009-02-13 12:43:56 -05001691 }
1692 } while_for_each_ftrace_rec();
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001693}
1694
Steven Rostedtc88fd862011-08-16 09:53:39 -04001695struct ftrace_rec_iter {
1696 struct ftrace_page *pg;
1697 int index;
1698};
1699
1700/**
1701 * ftrace_rec_iter_start, start up iterating over traced functions
1702 *
1703 * Returns an iterator handle that is used to iterate over all
1704 * the records that represent address locations where functions
1705 * are traced.
1706 *
1707 * May return NULL if no records are available.
1708 */
1709struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1710{
1711 /*
1712 * We only use a single iterator.
1713 * Protected by the ftrace_lock mutex.
1714 */
1715 static struct ftrace_rec_iter ftrace_rec_iter;
1716 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1717
1718 iter->pg = ftrace_pages_start;
1719 iter->index = 0;
1720
1721 /* Could have empty pages */
1722 while (iter->pg && !iter->pg->index)
1723 iter->pg = iter->pg->next;
1724
1725 if (!iter->pg)
1726 return NULL;
1727
1728 return iter;
1729}
1730
1731/**
1732 * ftrace_rec_iter_next, get the next record to process.
1733 * @iter: The handle to the iterator.
1734 *
1735 * Returns the next iterator after the given iterator @iter.
1736 */
1737struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1738{
1739 iter->index++;
1740
1741 if (iter->index >= iter->pg->index) {
1742 iter->pg = iter->pg->next;
1743 iter->index = 0;
1744
1745 /* Could have empty pages */
1746 while (iter->pg && !iter->pg->index)
1747 iter->pg = iter->pg->next;
1748 }
1749
1750 if (!iter->pg)
1751 return NULL;
1752
1753 return iter;
1754}
1755
1756/**
1757 * ftrace_rec_iter_record, get the record at the iterator location
1758 * @iter: The current iterator location
1759 *
1760 * Returns the record that the current @iter is at.
1761 */
1762struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1763{
1764 return &iter->pg->records[iter->index];
1765}
1766
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301767static int
Steven Rostedt31e88902008-11-14 16:21:19 -08001768ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001769{
1770 unsigned long ip;
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001771 int ret;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001772
1773 ip = rec->ip;
1774
Steven Rostedt45a4a232011-04-21 23:16:46 -04001775 if (unlikely(ftrace_disabled))
1776 return 0;
1777
Shaohua Li25aac9d2009-01-09 11:29:40 +08001778 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
Steven Rostedt593eb8a2008-10-23 09:32:59 -04001779 if (ret) {
Steven Rostedt31e88902008-11-14 16:21:19 -08001780 ftrace_bug(ret, ip);
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301781 return 0;
Steven Rostedt37ad5082008-05-12 21:20:48 +02001782 }
Abhishek Sagar492a7ea52008-05-25 00:10:04 +05301783 return 1;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02001784}
1785
Steven Rostedt000ab692009-02-17 13:35:06 -05001786/*
1787 * archs can override this function if they must do something
1788 * before the modifying code is performed.
1789 */
1790int __weak ftrace_arch_code_modify_prepare(void)
1791{
1792 return 0;
1793}
1794
1795/*
1796 * archs can override this function if they must do something
1797 * after the modifying code is performed.
1798 */
1799int __weak ftrace_arch_code_modify_post_process(void)
1800{
1801 return 0;
1802}
1803
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04001804void ftrace_modify_all_code(int command)
1805{
1806 if (command & FTRACE_UPDATE_CALLS)
1807 ftrace_replace_code(1);
1808 else if (command & FTRACE_DISABLE_CALLS)
1809 ftrace_replace_code(0);
1810
1811 if (command & FTRACE_UPDATE_TRACE_FUNC)
1812 ftrace_update_ftrace_func(ftrace_trace_function);
1813
1814 if (command & FTRACE_START_FUNC_RET)
1815 ftrace_enable_ftrace_graph_caller();
1816 else if (command & FTRACE_STOP_FUNC_RET)
1817 ftrace_disable_ftrace_graph_caller();
1818}
1819
Ingo Molnare309b412008-05-12 21:20:51 +02001820static int __ftrace_modify_code(void *data)
Steven Rostedt3d083392008-05-12 21:20:42 +02001821{
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001822 int *command = data;
1823
Steven Rostedt8ed3e2c2012-04-26 14:59:43 -04001824 ftrace_modify_all_code(*command);
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05001825
Steven Rostedtc88fd862011-08-16 09:53:39 -04001826 return 0;
1827}
1828
1829/**
1830 * ftrace_run_stop_machine, go back to the stop machine method
1831 * @command: The command to tell ftrace what to do
1832 *
1833 * If an arch needs to fall back to the stop machine method, the
1834 * it can call this function.
1835 */
1836void ftrace_run_stop_machine(int command)
1837{
1838 stop_machine(__ftrace_modify_code, &command, NULL);
1839}
1840
1841/**
1842 * arch_ftrace_update_code, modify the code to trace or not trace
1843 * @command: The command that needs to be done
1844 *
1845 * Archs can override this function if it does not need to
1846 * run stop_machine() to modify code.
1847 */
1848void __weak arch_ftrace_update_code(int command)
1849{
1850 ftrace_run_stop_machine(command);
1851}
1852
1853static void ftrace_run_update_code(int command)
1854{
1855 int ret;
1856
1857 ret = ftrace_arch_code_modify_prepare();
1858 FTRACE_WARN_ON(ret);
1859 if (ret)
1860 return;
1861 /*
1862 * Do not call function tracer while we update the code.
1863 * We are in stop machine.
1864 */
1865 function_trace_stop++;
1866
1867 /*
1868 * By default we use stop_machine() to modify the code.
1869 * But archs can do what ever they want as long as it
1870 * is safe. The stop_machine() is the safest, but also
1871 * produces the most overhead.
1872 */
1873 arch_ftrace_update_code(command);
1874
Steven Rostedt6331c282011-07-13 15:11:02 -04001875 function_trace_stop--;
1876
Steven Rostedt000ab692009-02-17 13:35:06 -05001877 ret = ftrace_arch_code_modify_post_process();
1878 FTRACE_WARN_ON(ret);
Steven Rostedt3d083392008-05-12 21:20:42 +02001879}
1880
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001881static ftrace_func_t saved_ftrace_func;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001882static int ftrace_start_up;
Steven Rostedtb8489142011-05-04 09:27:52 -04001883static int global_start_up;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001884
1885static void ftrace_startup_enable(int command)
1886{
1887 if (saved_ftrace_func != ftrace_trace_function) {
1888 saved_ftrace_func = ftrace_trace_function;
1889 command |= FTRACE_UPDATE_TRACE_FUNC;
1890 }
1891
1892 if (!command || !ftrace_enabled)
1893 return;
1894
1895 ftrace_run_update_code(command);
1896}
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001897
Steven Rostedta1cd6172011-05-23 15:24:25 -04001898static int ftrace_startup(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001899{
Steven Rostedtb8489142011-05-04 09:27:52 -04001900 bool hash_enable = true;
1901
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001902 if (unlikely(ftrace_disabled))
Steven Rostedta1cd6172011-05-23 15:24:25 -04001903 return -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001904
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001905 ftrace_start_up++;
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001906 command |= FTRACE_UPDATE_CALLS;
Steven Rostedt3d083392008-05-12 21:20:42 +02001907
Steven Rostedtb8489142011-05-04 09:27:52 -04001908 /* ops marked global share the filter hashes */
1909 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1910 ops = &global_ops;
1911 /* Don't update hash if global is already set */
1912 if (global_start_up)
1913 hash_enable = false;
1914 global_start_up++;
1915 }
1916
Steven Rostedted926f92011-05-03 13:25:24 -04001917 ops->flags |= FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001918 if (hash_enable)
Steven Rostedted926f92011-05-03 13:25:24 -04001919 ftrace_hash_rec_enable(ops, 1);
1920
Steven Rostedtdf4fc312008-11-26 00:16:23 -05001921 ftrace_startup_enable(command);
Steven Rostedta1cd6172011-05-23 15:24:25 -04001922
1923 return 0;
Steven Rostedt3d083392008-05-12 21:20:42 +02001924}
1925
Steven Rostedtbd69c302011-05-03 21:55:54 -04001926static void ftrace_shutdown(struct ftrace_ops *ops, int command)
Steven Rostedt3d083392008-05-12 21:20:42 +02001927{
Steven Rostedtb8489142011-05-04 09:27:52 -04001928 bool hash_disable = true;
1929
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001930 if (unlikely(ftrace_disabled))
1931 return;
1932
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001933 ftrace_start_up--;
Frederic Weisbecker9ea1a152009-06-20 06:52:21 +02001934 /*
1935 * Just warn in case of unbalance, no need to kill ftrace, it's not
1936 * critical but the ftrace_call callers may be never nopped again after
1937 * further ftrace uses.
1938 */
1939 WARN_ON_ONCE(ftrace_start_up < 0);
1940
Steven Rostedtb8489142011-05-04 09:27:52 -04001941 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1942 ops = &global_ops;
1943 global_start_up--;
1944 WARN_ON_ONCE(global_start_up < 0);
1945 /* Don't update hash if global still has users */
1946 if (global_start_up) {
1947 WARN_ON_ONCE(!ftrace_start_up);
1948 hash_disable = false;
1949 }
1950 }
1951
1952 if (hash_disable)
Steven Rostedted926f92011-05-03 13:25:24 -04001953 ftrace_hash_rec_disable(ops, 1);
1954
Steven Rostedtb8489142011-05-04 09:27:52 -04001955 if (ops != &global_ops || !global_start_up)
Steven Rostedted926f92011-05-03 13:25:24 -04001956 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
Steven Rostedtb8489142011-05-04 09:27:52 -04001957
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001958 command |= FTRACE_UPDATE_CALLS;
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001959
1960 if (saved_ftrace_func != ftrace_trace_function) {
1961 saved_ftrace_func = ftrace_trace_function;
1962 command |= FTRACE_UPDATE_TRACE_FUNC;
1963 }
1964
1965 if (!command || !ftrace_enabled)
Steven Rostedte6ea44e2009-02-14 01:42:44 -05001966 return;
Steven Rostedt3d083392008-05-12 21:20:42 +02001967
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001968 ftrace_run_update_code(command);
Steven Rostedt3d083392008-05-12 21:20:42 +02001969}
1970
Ingo Molnare309b412008-05-12 21:20:51 +02001971static void ftrace_startup_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001972{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001973 if (unlikely(ftrace_disabled))
1974 return;
1975
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001976 /* Force update next time */
1977 saved_ftrace_func = NULL;
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001978 /* ftrace_start_up is true if we want ftrace running */
1979 if (ftrace_start_up)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01001980 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001981}
1982
Ingo Molnare309b412008-05-12 21:20:51 +02001983static void ftrace_shutdown_sysctl(void)
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001984{
Steven Rostedt4eebcc82008-05-12 21:20:48 +02001985 if (unlikely(ftrace_disabled))
1986 return;
1987
Steven Rostedt60a7ecf2008-11-05 16:05:44 -05001988 /* ftrace_start_up is true if ftrace is running */
1989 if (ftrace_start_up)
Steven Rostedt79e406d2010-09-14 22:19:46 -04001990 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02001991}
1992
Steven Rostedt3d083392008-05-12 21:20:42 +02001993static cycle_t ftrace_update_time;
1994static unsigned long ftrace_update_cnt;
1995unsigned long ftrace_update_tot_cnt;
1996
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04001997static int ops_traces_mod(struct ftrace_ops *ops)
1998{
1999 struct ftrace_hash *hash;
2000
2001 hash = ops->filter_hash;
Steven Rostedt06a51d92011-12-19 19:07:36 -05002002 return ftrace_hash_empty(hash);
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002003}
2004
Steven Rostedt31e88902008-11-14 16:21:19 -08002005static int ftrace_update_code(struct module *mod)
Steven Rostedt3d083392008-05-12 21:20:42 +02002006{
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002007 struct ftrace_page *pg;
Lai Jiangshane94142a2009-03-13 17:51:27 +08002008 struct dyn_ftrace *p;
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302009 cycle_t start, stop;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002010 unsigned long ref = 0;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002011 int i;
Steven Rostedtf7bc8b62011-07-14 23:02:27 -04002012
2013 /*
2014 * When adding a module, we need to check if tracers are
2015 * currently enabled and if they are set to trace all functions.
2016 * If they are, we need to enable the module functions as well
2017 * as update the reference counts for those function records.
2018 */
2019 if (mod) {
2020 struct ftrace_ops *ops;
2021
2022 for (ops = ftrace_ops_list;
2023 ops != &ftrace_list_end; ops = ops->next) {
2024 if (ops->flags & FTRACE_OPS_FL_ENABLED &&
2025 ops_traces_mod(ops))
2026 ref++;
2027 }
2028 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002029
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002030 start = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002031 ftrace_update_cnt = 0;
2032
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002033 for (pg = ftrace_new_pgs; pg; pg = pg->next) {
Abhishek Sagarf22f9a82008-06-21 23:50:29 +05302034
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002035 for (i = 0; i < pg->index; i++) {
2036 /* If something went wrong, bail without enabling anything */
2037 if (unlikely(ftrace_disabled))
2038 return -1;
Steven Rostedt3d083392008-05-12 21:20:42 +02002039
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002040 p = &pg->records[i];
2041 p->flags = ref;
Abhishek Sagar0eb96702008-06-01 21:47:30 +05302042
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002043 /*
2044 * Do the initial record conversion from mcount jump
2045 * to the NOP instructions.
2046 */
2047 if (!ftrace_code_disable(mod, p))
2048 break;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002049
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002050 ftrace_update_cnt++;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002051
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002052 /*
2053 * If the tracing is enabled, go ahead and enable the record.
2054 *
2055 * The reason not to enable the record immediatelly is the
2056 * inherent check of ftrace_make_nop/ftrace_make_call for
2057 * correct previous instructions. Making first the NOP
2058 * conversion puts the module to the correct state, thus
2059 * passing the ftrace_make_call check.
2060 */
2061 if (ftrace_start_up && ref) {
2062 int failed = __ftrace_replace_code(p, 1);
2063 if (failed)
2064 ftrace_bug(failed, p->ip);
2065 }
Jiri Olsa5cb084b2009-10-13 16:33:53 -04002066 }
Steven Rostedt3d083392008-05-12 21:20:42 +02002067 }
2068
Steven Rostedt85ae32a2011-12-16 16:30:31 -05002069 ftrace_new_pgs = NULL;
2070
Ingo Molnar750ed1a2008-05-12 21:20:46 +02002071 stop = ftrace_now(raw_smp_processor_id());
Steven Rostedt3d083392008-05-12 21:20:42 +02002072 ftrace_update_time = stop - start;
2073 ftrace_update_tot_cnt += ftrace_update_cnt;
2074
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02002075 return 0;
2076}
2077
Steven Rostedta7900872011-12-16 16:23:44 -05002078static int ftrace_allocate_records(struct ftrace_page *pg, int count)
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002079{
Steven Rostedta7900872011-12-16 16:23:44 -05002080 int order;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002081 int cnt;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002082
Steven Rostedta7900872011-12-16 16:23:44 -05002083 if (WARN_ON(!count))
2084 return -EINVAL;
2085
2086 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002087
2088 /*
Steven Rostedta7900872011-12-16 16:23:44 -05002089 * We want to fill as much as possible. No more than a page
2090 * may be empty.
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002091 */
Steven Rostedta7900872011-12-16 16:23:44 -05002092 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2093 order--;
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002094
Steven Rostedta7900872011-12-16 16:23:44 -05002095 again:
2096 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2097
2098 if (!pg->records) {
2099 /* if we can't allocate this size, try something smaller */
2100 if (!order)
2101 return -ENOMEM;
2102 order >>= 1;
2103 goto again;
2104 }
2105
2106 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2107 pg->size = cnt;
2108
2109 if (cnt > count)
2110 cnt = count;
2111
2112 return cnt;
2113}
2114
2115static struct ftrace_page *
2116ftrace_allocate_pages(unsigned long num_to_init)
2117{
2118 struct ftrace_page *start_pg;
2119 struct ftrace_page *pg;
2120 int order;
2121 int cnt;
2122
2123 if (!num_to_init)
2124 return 0;
2125
2126 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2127 if (!pg)
2128 return NULL;
2129
2130 /*
2131 * Try to allocate as much as possible in one continues
2132 * location that fills in all of the space. We want to
2133 * waste as little space as possible.
2134 */
2135 for (;;) {
2136 cnt = ftrace_allocate_records(pg, num_to_init);
2137 if (cnt < 0)
2138 goto free_pages;
2139
2140 num_to_init -= cnt;
2141 if (!num_to_init)
2142 break;
2143
2144 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2145 if (!pg->next)
2146 goto free_pages;
2147
2148 pg = pg->next;
2149 }
2150
2151 return start_pg;
2152
2153 free_pages:
2154 while (start_pg) {
2155 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2156 free_pages((unsigned long)pg->records, order);
2157 start_pg = pg->next;
2158 kfree(pg);
2159 pg = start_pg;
2160 }
2161 pr_info("ftrace: FAILED to allocate memory for functions\n");
2162 return NULL;
2163}
2164
2165static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2166{
2167 int cnt;
2168
2169 if (!num_to_init) {
2170 pr_info("ftrace: No functions to be traced?\n");
2171 return -1;
2172 }
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002173
Steven Rostedt68bf21a2008-08-14 15:45:08 -04002174 cnt = num_to_init / ENTRIES_PER_PAGE;
Steven Rostedt08f5ac902008-10-23 09:33:07 -04002175 pr_info("ftrace: allocating %ld entries in %d pages\n",
walimis5821e1b2008-11-15 15:19:06 +08002176 num_to_init, cnt + 1);
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002177
Steven Rostedt3c1720f2008-05-12 21:20:43 +02002178 return 0;
2179}
2180
Steven Rostedt5072c592008-05-12 21:20:43 +02002181#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2182
2183struct ftrace_iterator {
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002184 loff_t pos;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002185 loff_t func_pos;
2186 struct ftrace_page *pg;
2187 struct dyn_ftrace *func;
2188 struct ftrace_func_probe *probe;
2189 struct trace_parser parser;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002190 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002191 struct ftrace_ops *ops;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002192 int hidx;
2193 int idx;
2194 unsigned flags;
Steven Rostedt5072c592008-05-12 21:20:43 +02002195};
2196
Ingo Molnare309b412008-05-12 21:20:51 +02002197static void *
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002198t_hash_next(struct seq_file *m, loff_t *pos)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002199{
2200 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002201 struct hlist_node *hnd = NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002202 struct hlist_head *hhd;
2203
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002204 (*pos)++;
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002205 iter->pos = *pos;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002206
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002207 if (iter->probe)
2208 hnd = &iter->probe->node;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002209 retry:
2210 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2211 return NULL;
2212
2213 hhd = &ftrace_func_hash[iter->hidx];
2214
2215 if (hlist_empty(hhd)) {
2216 iter->hidx++;
2217 hnd = NULL;
2218 goto retry;
2219 }
2220
2221 if (!hnd)
2222 hnd = hhd->first;
2223 else {
2224 hnd = hnd->next;
2225 if (!hnd) {
2226 iter->hidx++;
2227 goto retry;
2228 }
2229 }
2230
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002231 if (WARN_ON_ONCE(!hnd))
2232 return NULL;
2233
2234 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2235
2236 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002237}
2238
2239static void *t_hash_start(struct seq_file *m, loff_t *pos)
2240{
2241 struct ftrace_iterator *iter = m->private;
2242 void *p = NULL;
Li Zefand82d6242009-06-24 09:54:54 +08002243 loff_t l;
2244
Steven Rostedt69a30832011-12-19 15:21:16 -05002245 if (!(iter->flags & FTRACE_ITER_DO_HASH))
2246 return NULL;
2247
Steven Rostedt2bccfff2010-09-09 08:43:22 -04002248 if (iter->func_pos > *pos)
2249 return NULL;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002250
Li Zefand82d6242009-06-24 09:54:54 +08002251 iter->hidx = 0;
Steven Rostedt2bccfff2010-09-09 08:43:22 -04002252 for (l = 0; l <= (*pos - iter->func_pos); ) {
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002253 p = t_hash_next(m, &l);
Li Zefand82d6242009-06-24 09:54:54 +08002254 if (!p)
2255 break;
2256 }
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002257 if (!p)
2258 return NULL;
2259
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002260 /* Only set this if we have an item */
2261 iter->flags |= FTRACE_ITER_HASH;
2262
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002263 return iter;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002264}
2265
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002266static int
2267t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002268{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002269 struct ftrace_func_probe *rec;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002270
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002271 rec = iter->probe;
2272 if (WARN_ON_ONCE(!rec))
2273 return -EIO;
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002274
Steven Rostedt809dcf22009-02-16 23:06:01 -05002275 if (rec->ops->print)
2276 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2277
Steven Rostedtb375a112009-09-17 00:05:58 -04002278 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002279
2280 if (rec->data)
2281 seq_printf(m, ":%p", rec->data);
2282 seq_putc(m, '\n');
2283
2284 return 0;
2285}
2286
2287static void *
Steven Rostedt5072c592008-05-12 21:20:43 +02002288t_next(struct seq_file *m, void *v, loff_t *pos)
2289{
2290 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002291 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002292 struct dyn_ftrace *rec = NULL;
2293
Steven Rostedt45a4a232011-04-21 23:16:46 -04002294 if (unlikely(ftrace_disabled))
2295 return NULL;
2296
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002297 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002298 return t_hash_next(m, pos);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002299
Steven Rostedt5072c592008-05-12 21:20:43 +02002300 (*pos)++;
Jiri Olsa1106b692011-02-16 17:35:34 +01002301 iter->pos = iter->func_pos = *pos;
Steven Rostedt5072c592008-05-12 21:20:43 +02002302
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002303 if (iter->flags & FTRACE_ITER_PRINTALL)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002304 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002305
Steven Rostedt5072c592008-05-12 21:20:43 +02002306 retry:
2307 if (iter->idx >= iter->pg->index) {
2308 if (iter->pg->next) {
2309 iter->pg = iter->pg->next;
2310 iter->idx = 0;
2311 goto retry;
2312 }
2313 } else {
2314 rec = &iter->pg->records[iter->idx++];
Steven Rostedt32082302011-12-16 14:42:37 -05002315 if (((iter->flags & FTRACE_ITER_FILTER) &&
Steven Rostedtf45948e2011-05-02 12:29:25 -04002316 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
Steven Rostedt0183fb12008-11-07 22:36:02 -05002317
Steven Rostedt41c52c02008-05-22 11:46:33 -04002318 ((iter->flags & FTRACE_ITER_NOTRACE) &&
Steven Rostedt647bcd02011-05-03 14:39:21 -04002319 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2320
2321 ((iter->flags & FTRACE_ITER_ENABLED) &&
2322 !(rec->flags & ~FTRACE_FL_MASK))) {
2323
Steven Rostedt5072c592008-05-12 21:20:43 +02002324 rec = NULL;
2325 goto retry;
2326 }
2327 }
2328
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002329 if (!rec)
Steven Rostedt57c072c2010-09-14 11:21:11 -04002330 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002331
2332 iter->func = rec;
2333
2334 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002335}
2336
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002337static void reset_iter_read(struct ftrace_iterator *iter)
2338{
2339 iter->pos = 0;
2340 iter->func_pos = 0;
2341 iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
Steven Rostedt5072c592008-05-12 21:20:43 +02002342}
2343
2344static void *t_start(struct seq_file *m, loff_t *pos)
2345{
2346 struct ftrace_iterator *iter = m->private;
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002347 struct ftrace_ops *ops = iter->ops;
Steven Rostedt5072c592008-05-12 21:20:43 +02002348 void *p = NULL;
Li Zefan694ce0a2009-06-24 09:54:19 +08002349 loff_t l;
Steven Rostedt5072c592008-05-12 21:20:43 +02002350
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002351 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04002352
2353 if (unlikely(ftrace_disabled))
2354 return NULL;
2355
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002356 /*
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002357 * If an lseek was done, then reset and start from beginning.
2358 */
2359 if (*pos < iter->pos)
2360 reset_iter_read(iter);
2361
2362 /*
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002363 * For set_ftrace_filter reading, if we have the filter
2364 * off, we can short cut and just print out that all
2365 * functions are enabled.
2366 */
Steven Rostedt06a51d92011-12-19 19:07:36 -05002367 if (iter->flags & FTRACE_ITER_FILTER &&
2368 ftrace_hash_empty(ops->filter_hash)) {
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002369 if (*pos > 0)
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002370 return t_hash_start(m, pos);
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002371 iter->flags |= FTRACE_ITER_PRINTALL;
Chris Wrightdf091622010-09-09 16:34:59 -07002372 /* reset in case of seek/pread */
2373 iter->flags &= ~FTRACE_ITER_HASH;
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002374 return iter;
2375 }
2376
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002377 if (iter->flags & FTRACE_ITER_HASH)
2378 return t_hash_start(m, pos);
2379
Steven Rostedt98c4fd02010-09-10 11:47:43 -04002380 /*
2381 * Unfortunately, we need to restart at ftrace_pages_start
2382 * every time we let go of the ftrace_mutex. This is because
2383 * those pointers can change without the lock.
2384 */
Li Zefan694ce0a2009-06-24 09:54:19 +08002385 iter->pg = ftrace_pages_start;
2386 iter->idx = 0;
2387 for (l = 0; l <= *pos; ) {
2388 p = t_next(m, p, &l);
2389 if (!p)
2390 break;
Liming Wang50cdaf02008-11-28 12:13:21 +08002391 }
walimis5821e1b2008-11-15 15:19:06 +08002392
Steven Rostedt69a30832011-12-19 15:21:16 -05002393 if (!p)
2394 return t_hash_start(m, pos);
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002395
2396 return iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002397}
2398
2399static void t_stop(struct seq_file *m, void *p)
2400{
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002401 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002402}
2403
2404static int t_show(struct seq_file *m, void *v)
2405{
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002406 struct ftrace_iterator *iter = m->private;
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002407 struct dyn_ftrace *rec;
Steven Rostedt5072c592008-05-12 21:20:43 +02002408
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002409 if (iter->flags & FTRACE_ITER_HASH)
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002410 return t_hash_show(m, iter);
Steven Rostedt8fc0c702009-02-16 15:28:00 -05002411
Steven Rostedt0c75a3e2009-02-16 11:21:52 -05002412 if (iter->flags & FTRACE_ITER_PRINTALL) {
2413 seq_printf(m, "#### all functions enabled ####\n");
2414 return 0;
2415 }
2416
Steven Rostedt4aeb6962010-09-09 10:00:28 -04002417 rec = iter->func;
2418
Steven Rostedt5072c592008-05-12 21:20:43 +02002419 if (!rec)
2420 return 0;
2421
Steven Rostedt647bcd02011-05-03 14:39:21 -04002422 seq_printf(m, "%ps", (void *)rec->ip);
2423 if (iter->flags & FTRACE_ITER_ENABLED)
2424 seq_printf(m, " (%ld)",
2425 rec->flags & ~FTRACE_FL_MASK);
2426 seq_printf(m, "\n");
Steven Rostedt5072c592008-05-12 21:20:43 +02002427
2428 return 0;
2429}
2430
James Morris88e9d342009-09-22 16:43:43 -07002431static const struct seq_operations show_ftrace_seq_ops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02002432 .start = t_start,
2433 .next = t_next,
2434 .stop = t_stop,
2435 .show = t_show,
2436};
2437
Ingo Molnare309b412008-05-12 21:20:51 +02002438static int
Steven Rostedt5072c592008-05-12 21:20:43 +02002439ftrace_avail_open(struct inode *inode, struct file *file)
2440{
2441 struct ftrace_iterator *iter;
Steven Rostedt5072c592008-05-12 21:20:43 +02002442
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002443 if (unlikely(ftrace_disabled))
2444 return -ENODEV;
2445
Jiri Olsa50e18b92012-04-25 10:23:39 +02002446 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2447 if (iter) {
2448 iter->pg = ftrace_pages_start;
2449 iter->ops = &global_ops;
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002450 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002451
Jiri Olsa50e18b92012-04-25 10:23:39 +02002452 return iter ? 0 : -ENOMEM;
Steven Rostedt5072c592008-05-12 21:20:43 +02002453}
2454
Steven Rostedt647bcd02011-05-03 14:39:21 -04002455static int
2456ftrace_enabled_open(struct inode *inode, struct file *file)
2457{
2458 struct ftrace_iterator *iter;
Steven Rostedt647bcd02011-05-03 14:39:21 -04002459
2460 if (unlikely(ftrace_disabled))
2461 return -ENODEV;
2462
Jiri Olsa50e18b92012-04-25 10:23:39 +02002463 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2464 if (iter) {
2465 iter->pg = ftrace_pages_start;
2466 iter->flags = FTRACE_ITER_ENABLED;
2467 iter->ops = &global_ops;
Steven Rostedt647bcd02011-05-03 14:39:21 -04002468 }
2469
Jiri Olsa50e18b92012-04-25 10:23:39 +02002470 return iter ? 0 : -ENOMEM;
Steven Rostedt647bcd02011-05-03 14:39:21 -04002471}
2472
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002473static void ftrace_filter_reset(struct ftrace_hash *hash)
Steven Rostedt5072c592008-05-12 21:20:43 +02002474{
Steven Rostedt52baf112009-02-14 01:15:39 -05002475 mutex_lock(&ftrace_lock);
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002476 ftrace_hash_clear(hash);
Steven Rostedt52baf112009-02-14 01:15:39 -05002477 mutex_unlock(&ftrace_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002478}
2479
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002480/**
2481 * ftrace_regex_open - initialize function tracer filter files
2482 * @ops: The ftrace_ops that hold the hash filters
2483 * @flag: The type of filter to process
2484 * @inode: The inode, usually passed in to your open routine
2485 * @file: The file, usually passed in to your open routine
2486 *
2487 * ftrace_regex_open() initializes the filter files for the
2488 * @ops. Depending on @flag it may process the filter hash or
2489 * the notrace hash of @ops. With this called from the open
2490 * routine, you can use ftrace_filter_write() for the write
2491 * routine if @flag has FTRACE_ITER_FILTER set, or
2492 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2493 * ftrace_regex_lseek() should be used as the lseek routine, and
2494 * release must call ftrace_regex_release().
2495 */
2496int
Steven Rostedtf45948e2011-05-02 12:29:25 -04002497ftrace_regex_open(struct ftrace_ops *ops, int flag,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002498 struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02002499{
2500 struct ftrace_iterator *iter;
Steven Rostedtf45948e2011-05-02 12:29:25 -04002501 struct ftrace_hash *hash;
Steven Rostedt5072c592008-05-12 21:20:43 +02002502 int ret = 0;
2503
Steven Rostedt4eebcc82008-05-12 21:20:48 +02002504 if (unlikely(ftrace_disabled))
2505 return -ENODEV;
2506
Steven Rostedt5072c592008-05-12 21:20:43 +02002507 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2508 if (!iter)
2509 return -ENOMEM;
2510
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02002511 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2512 kfree(iter);
2513 return -ENOMEM;
2514 }
2515
Steven Rostedtf45948e2011-05-02 12:29:25 -04002516 if (flag & FTRACE_ITER_NOTRACE)
2517 hash = ops->notrace_hash;
2518 else
2519 hash = ops->filter_hash;
2520
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002521 iter->ops = ops;
2522 iter->flags = flag;
2523
2524 if (file->f_mode & FMODE_WRITE) {
2525 mutex_lock(&ftrace_lock);
2526 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2527 mutex_unlock(&ftrace_lock);
2528
2529 if (!iter->hash) {
2530 trace_parser_put(&iter->parser);
2531 kfree(iter);
2532 return -ENOMEM;
2533 }
2534 }
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002535
Steven Rostedt41c52c02008-05-22 11:46:33 -04002536 mutex_lock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002537
Steven Rostedt5072c592008-05-12 21:20:43 +02002538 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002539 (file->f_flags & O_TRUNC))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002540 ftrace_filter_reset(iter->hash);
Steven Rostedt5072c592008-05-12 21:20:43 +02002541
2542 if (file->f_mode & FMODE_READ) {
2543 iter->pg = ftrace_pages_start;
Steven Rostedt5072c592008-05-12 21:20:43 +02002544
2545 ret = seq_open(file, &show_ftrace_seq_ops);
2546 if (!ret) {
2547 struct seq_file *m = file->private_data;
2548 m->private = iter;
Li Zefan79fe2492009-09-22 13:54:28 +08002549 } else {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002550 /* Failed */
2551 free_ftrace_hash(iter->hash);
Li Zefan79fe2492009-09-22 13:54:28 +08002552 trace_parser_put(&iter->parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02002553 kfree(iter);
Li Zefan79fe2492009-09-22 13:54:28 +08002554 }
Steven Rostedt5072c592008-05-12 21:20:43 +02002555 } else
2556 file->private_data = iter;
Steven Rostedt41c52c02008-05-22 11:46:33 -04002557 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02002558
2559 return ret;
2560}
2561
Steven Rostedt41c52c02008-05-22 11:46:33 -04002562static int
2563ftrace_filter_open(struct inode *inode, struct file *file)
2564{
Steven Rostedt69a30832011-12-19 15:21:16 -05002565 return ftrace_regex_open(&global_ops,
2566 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2567 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002568}
2569
2570static int
2571ftrace_notrace_open(struct inode *inode, struct file *file)
2572{
Steven Rostedtf45948e2011-05-02 12:29:25 -04002573 return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002574 inode, file);
Steven Rostedt41c52c02008-05-22 11:46:33 -04002575}
2576
Steven Rostedtfc13cb02011-12-19 14:41:25 -05002577loff_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04002578ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
Steven Rostedt5072c592008-05-12 21:20:43 +02002579{
2580 loff_t ret;
2581
2582 if (file->f_mode & FMODE_READ)
2583 ret = seq_lseek(file, offset, origin);
2584 else
2585 file->f_pos = ret = 1;
2586
2587 return ret;
2588}
2589
Steven Rostedt64e7c442009-02-13 17:08:48 -05002590static int ftrace_match(char *str, char *regex, int len, int type)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002591{
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002592 int matched = 0;
Li Zefan751e9982010-01-14 10:53:02 +08002593 int slen;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002594
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002595 switch (type) {
2596 case MATCH_FULL:
2597 if (strcmp(str, regex) == 0)
2598 matched = 1;
2599 break;
2600 case MATCH_FRONT_ONLY:
2601 if (strncmp(str, regex, len) == 0)
2602 matched = 1;
2603 break;
2604 case MATCH_MIDDLE_ONLY:
2605 if (strstr(str, regex))
2606 matched = 1;
2607 break;
2608 case MATCH_END_ONLY:
Li Zefan751e9982010-01-14 10:53:02 +08002609 slen = strlen(str);
2610 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002611 matched = 1;
2612 break;
2613 }
2614
2615 return matched;
2616}
2617
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002618static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002619enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
Steven Rostedt996e87b2011-04-26 16:11:03 -04002620{
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002621 struct ftrace_func_entry *entry;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002622 int ret = 0;
2623
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002624 entry = ftrace_lookup_ip(hash, rec->ip);
2625 if (not) {
2626 /* Do nothing if it doesn't exist */
2627 if (!entry)
2628 return 0;
2629
Steven Rostedt33dc9b12011-05-02 17:34:47 -04002630 free_hash_entry(hash, entry);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002631 } else {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002632 /* Do nothing if it exists */
2633 if (entry)
2634 return 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002635
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002636 ret = add_hash_entry(hash, rec->ip);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002637 }
2638 return ret;
Steven Rostedt996e87b2011-04-26 16:11:03 -04002639}
2640
Steven Rostedt64e7c442009-02-13 17:08:48 -05002641static int
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002642ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2643 char *regex, int len, int type)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002644{
2645 char str[KSYM_SYMBOL_LEN];
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002646 char *modname;
Steven Rostedt64e7c442009-02-13 17:08:48 -05002647
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002648 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2649
2650 if (mod) {
2651 /* module lookup requires matching the module */
2652 if (!modname || strcmp(modname, mod))
2653 return 0;
2654
2655 /* blank search means to match all funcs in the mod */
2656 if (!len)
2657 return 1;
2658 }
2659
Steven Rostedt64e7c442009-02-13 17:08:48 -05002660 return ftrace_match(str, regex, len, type);
2661}
2662
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002663static int
2664match_records(struct ftrace_hash *hash, char *buff,
2665 int len, char *mod, int not)
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002666{
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002667 unsigned search_len = 0;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002668 struct ftrace_page *pg;
2669 struct dyn_ftrace *rec;
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002670 int type = MATCH_FULL;
2671 char *search = buff;
Li Zefan311d16d2009-12-08 11:15:11 +08002672 int found = 0;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002673 int ret;
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002674
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002675 if (len) {
2676 type = filter_parse_regex(buff, len, &search, &not);
2677 search_len = strlen(search);
2678 }
Steven Rostedt9f4801e2009-02-13 15:56:43 -05002679
Steven Rostedt52baf112009-02-14 01:15:39 -05002680 mutex_lock(&ftrace_lock);
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002681
2682 if (unlikely(ftrace_disabled))
2683 goto out_unlock;
2684
Steven Rostedt265c8312009-02-13 12:43:56 -05002685 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002686 if (ftrace_match_record(rec, mod, search, search_len, type)) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002687 ret = enter_record(hash, rec, not);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002688 if (ret < 0) {
2689 found = ret;
2690 goto out_unlock;
2691 }
Li Zefan311d16d2009-12-08 11:15:11 +08002692 found = 1;
Steven Rostedt265c8312009-02-13 12:43:56 -05002693 }
2694 } while_for_each_ftrace_rec();
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002695 out_unlock:
Steven Rostedt52baf112009-02-14 01:15:39 -05002696 mutex_unlock(&ftrace_lock);
Li Zefan311d16d2009-12-08 11:15:11 +08002697
2698 return found;
Steven Rostedt5072c592008-05-12 21:20:43 +02002699}
2700
Steven Rostedt64e7c442009-02-13 17:08:48 -05002701static int
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002702ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002703{
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002704 return match_records(hash, buff, len, NULL, 0);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002705}
2706
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002707static int
2708ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
Steven Rostedt64e7c442009-02-13 17:08:48 -05002709{
Steven Rostedt64e7c442009-02-13 17:08:48 -05002710 int not = 0;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002711
Steven Rostedt64e7c442009-02-13 17:08:48 -05002712 /* blank or '*' mean the same */
2713 if (strcmp(buff, "*") == 0)
2714 buff[0] = 0;
2715
2716 /* handle the case of 'dont filter this module' */
2717 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2718 buff[0] = 0;
2719 not = 1;
2720 }
2721
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002722 return match_records(hash, buff, strlen(buff), mod, not);
Steven Rostedt64e7c442009-02-13 17:08:48 -05002723}
2724
Steven Rostedtf6180772009-02-14 00:40:25 -05002725/*
2726 * We register the module command as a template to show others how
2727 * to register the a command as well.
2728 */
2729
2730static int
Steven Rostedt43dd61c2011-07-07 11:09:22 -04002731ftrace_mod_callback(struct ftrace_hash *hash,
2732 char *func, char *cmd, char *param, int enable)
Steven Rostedtf6180772009-02-14 00:40:25 -05002733{
2734 char *mod;
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002735 int ret = -EINVAL;
Steven Rostedtf6180772009-02-14 00:40:25 -05002736
2737 /*
2738 * cmd == 'mod' because we only registered this func
2739 * for the 'mod' ftrace_func_command.
2740 * But if you register one func with multiple commands,
2741 * you can tell which command was used by the cmd
2742 * parameter.
2743 */
2744
2745 /* we must have a module name */
2746 if (!param)
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002747 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002748
2749 mod = strsep(&param, ":");
2750 if (!strlen(mod))
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002751 return ret;
Steven Rostedtf6180772009-02-14 00:40:25 -05002752
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04002753 ret = ftrace_match_module_records(hash, func, mod);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04002754 if (!ret)
2755 ret = -EINVAL;
2756 if (ret < 0)
2757 return ret;
2758
2759 return 0;
Steven Rostedtf6180772009-02-14 00:40:25 -05002760}
2761
2762static struct ftrace_func_command ftrace_mod_cmd = {
2763 .name = "mod",
2764 .func = ftrace_mod_callback,
2765};
2766
2767static int __init ftrace_mod_cmd_init(void)
2768{
2769 return register_ftrace_command(&ftrace_mod_cmd);
2770}
2771device_initcall(ftrace_mod_cmd_init);
2772
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04002773static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04002774 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002775{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002776 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002777 struct hlist_head *hhd;
2778 struct hlist_node *n;
2779 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002780
2781 key = hash_long(ip, FTRACE_HASH_BITS);
2782
2783 hhd = &ftrace_func_hash[key];
2784
2785 if (hlist_empty(hhd))
2786 return;
2787
2788 /*
2789 * Disable preemption for these calls to prevent a RCU grace
2790 * period. This syncs the hash iteration and freeing of items
2791 * on the hash. rcu_read_lock is too dangerous here.
2792 */
Steven Rostedt5168ae52010-06-03 09:36:50 -04002793 preempt_disable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002794 hlist_for_each_entry_rcu(entry, n, hhd, node) {
2795 if (entry->ip == ip)
2796 entry->ops->func(ip, parent_ip, &entry->data);
2797 }
Steven Rostedt5168ae52010-06-03 09:36:50 -04002798 preempt_enable_notrace();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002799}
2800
Steven Rostedtb6887d72009-02-17 12:32:04 -05002801static struct ftrace_ops trace_probe_ops __read_mostly =
Steven Rostedt59df055f2009-02-14 15:29:06 -05002802{
Steven Rostedtfb9fb012009-03-25 13:26:41 -04002803 .func = function_trace_probe_call,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002804};
2805
Steven Rostedtb6887d72009-02-17 12:32:04 -05002806static int ftrace_probe_registered;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002807
Steven Rostedtb6887d72009-02-17 12:32:04 -05002808static void __enable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002809{
Steven Rostedtb8489142011-05-04 09:27:52 -04002810 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002811 int i;
2812
Steven Rostedtb6887d72009-02-17 12:32:04 -05002813 if (ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002814 return;
2815
2816 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2817 struct hlist_head *hhd = &ftrace_func_hash[i];
2818 if (hhd->first)
2819 break;
2820 }
2821 /* Nothing registered? */
2822 if (i == FTRACE_FUNC_HASHSIZE)
2823 return;
2824
Steven Rostedtb8489142011-05-04 09:27:52 -04002825 ret = __register_ftrace_function(&trace_probe_ops);
2826 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04002827 ret = ftrace_startup(&trace_probe_ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04002828
Steven Rostedtb6887d72009-02-17 12:32:04 -05002829 ftrace_probe_registered = 1;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002830}
2831
Steven Rostedtb6887d72009-02-17 12:32:04 -05002832static void __disable_ftrace_function_probe(void)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002833{
Steven Rostedtb8489142011-05-04 09:27:52 -04002834 int ret;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002835 int i;
2836
Steven Rostedtb6887d72009-02-17 12:32:04 -05002837 if (!ftrace_probe_registered)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002838 return;
2839
2840 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2841 struct hlist_head *hhd = &ftrace_func_hash[i];
2842 if (hhd->first)
2843 return;
2844 }
2845
2846 /* no more funcs left */
Steven Rostedtb8489142011-05-04 09:27:52 -04002847 ret = __unregister_ftrace_function(&trace_probe_ops);
2848 if (!ret)
2849 ftrace_shutdown(&trace_probe_ops, 0);
2850
Steven Rostedtb6887d72009-02-17 12:32:04 -05002851 ftrace_probe_registered = 0;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002852}
2853
2854
2855static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2856{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002857 struct ftrace_func_probe *entry =
2858 container_of(rhp, struct ftrace_func_probe, rcu);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002859
2860 if (entry->ops->free)
2861 entry->ops->free(&entry->data);
2862 kfree(entry);
2863}
2864
2865
2866int
Steven Rostedtb6887d72009-02-17 12:32:04 -05002867register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002868 void *data)
2869{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002870 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002871 struct ftrace_page *pg;
2872 struct dyn_ftrace *rec;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002873 int type, len, not;
Steven Rostedt6a24a242009-02-17 11:20:26 -05002874 unsigned long key;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002875 int count = 0;
2876 char *search;
2877
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002878 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002879 len = strlen(search);
2880
Steven Rostedtb6887d72009-02-17 12:32:04 -05002881 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002882 if (WARN_ON(not))
2883 return -EINVAL;
2884
2885 mutex_lock(&ftrace_lock);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002886
Steven Rostedt45a4a232011-04-21 23:16:46 -04002887 if (unlikely(ftrace_disabled))
2888 goto out_unlock;
2889
Steven Rostedt59df055f2009-02-14 15:29:06 -05002890 do_for_each_ftrace_rec(pg, rec) {
2891
Steven Rostedtb9df92d2011-04-28 20:32:08 -04002892 if (!ftrace_match_record(rec, NULL, search, len, type))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002893 continue;
2894
2895 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2896 if (!entry) {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002897 /* If we did not process any, then return error */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002898 if (!count)
2899 count = -ENOMEM;
2900 goto out_unlock;
2901 }
2902
2903 count++;
2904
2905 entry->data = data;
2906
2907 /*
2908 * The caller might want to do something special
2909 * for each function we find. We call the callback
2910 * to give the caller an opportunity to do so.
2911 */
2912 if (ops->callback) {
2913 if (ops->callback(rec->ip, &entry->data) < 0) {
2914 /* caller does not like this func */
2915 kfree(entry);
2916 continue;
2917 }
2918 }
2919
2920 entry->ops = ops;
2921 entry->ip = rec->ip;
2922
2923 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2924 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2925
2926 } while_for_each_ftrace_rec();
Steven Rostedtb6887d72009-02-17 12:32:04 -05002927 __enable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002928
2929 out_unlock:
2930 mutex_unlock(&ftrace_lock);
2931
2932 return count;
2933}
2934
2935enum {
Steven Rostedtb6887d72009-02-17 12:32:04 -05002936 PROBE_TEST_FUNC = 1,
2937 PROBE_TEST_DATA = 2
Steven Rostedt59df055f2009-02-14 15:29:06 -05002938};
2939
2940static void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002941__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002942 void *data, int flags)
2943{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002944 struct ftrace_func_probe *entry;
Steven Rostedt59df055f2009-02-14 15:29:06 -05002945 struct hlist_node *n, *tmp;
2946 char str[KSYM_SYMBOL_LEN];
2947 int type = MATCH_FULL;
2948 int i, len = 0;
2949 char *search;
2950
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002951 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
Steven Rostedt59df055f2009-02-14 15:29:06 -05002952 glob = NULL;
Atsushi Tsujib36461d2009-09-15 19:06:30 +09002953 else if (glob) {
Steven Rostedt59df055f2009-02-14 15:29:06 -05002954 int not;
2955
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02002956 type = filter_parse_regex(glob, strlen(glob), &search, &not);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002957 len = strlen(search);
2958
Steven Rostedtb6887d72009-02-17 12:32:04 -05002959 /* we do not support '!' for function probes */
Steven Rostedt59df055f2009-02-14 15:29:06 -05002960 if (WARN_ON(not))
2961 return;
2962 }
2963
2964 mutex_lock(&ftrace_lock);
2965 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2966 struct hlist_head *hhd = &ftrace_func_hash[i];
2967
2968 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2969
2970 /* break up if statements for readability */
Steven Rostedtb6887d72009-02-17 12:32:04 -05002971 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002972 continue;
2973
Steven Rostedtb6887d72009-02-17 12:32:04 -05002974 if ((flags & PROBE_TEST_DATA) && entry->data != data)
Steven Rostedt59df055f2009-02-14 15:29:06 -05002975 continue;
2976
2977 /* do this last, since it is the most expensive */
2978 if (glob) {
2979 kallsyms_lookup(entry->ip, NULL, NULL,
2980 NULL, str);
2981 if (!ftrace_match(str, glob, len, type))
2982 continue;
2983 }
2984
2985 hlist_del(&entry->node);
2986 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2987 }
2988 }
Steven Rostedtb6887d72009-02-17 12:32:04 -05002989 __disable_ftrace_function_probe();
Steven Rostedt59df055f2009-02-14 15:29:06 -05002990 mutex_unlock(&ftrace_lock);
2991}
2992
2993void
Steven Rostedtb6887d72009-02-17 12:32:04 -05002994unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
Steven Rostedt59df055f2009-02-14 15:29:06 -05002995 void *data)
2996{
Steven Rostedtb6887d72009-02-17 12:32:04 -05002997 __unregister_ftrace_function_probe(glob, ops, data,
2998 PROBE_TEST_FUNC | PROBE_TEST_DATA);
Steven Rostedt59df055f2009-02-14 15:29:06 -05002999}
3000
3001void
Steven Rostedtb6887d72009-02-17 12:32:04 -05003002unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003003{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003004 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003005}
3006
Steven Rostedtb6887d72009-02-17 12:32:04 -05003007void unregister_ftrace_function_probe_all(char *glob)
Steven Rostedt59df055f2009-02-14 15:29:06 -05003008{
Steven Rostedtb6887d72009-02-17 12:32:04 -05003009 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
Steven Rostedt59df055f2009-02-14 15:29:06 -05003010}
3011
Steven Rostedtf6180772009-02-14 00:40:25 -05003012static LIST_HEAD(ftrace_commands);
3013static DEFINE_MUTEX(ftrace_cmd_mutex);
3014
3015int register_ftrace_command(struct ftrace_func_command *cmd)
3016{
3017 struct ftrace_func_command *p;
3018 int ret = 0;
3019
3020 mutex_lock(&ftrace_cmd_mutex);
3021 list_for_each_entry(p, &ftrace_commands, list) {
3022 if (strcmp(cmd->name, p->name) == 0) {
3023 ret = -EBUSY;
3024 goto out_unlock;
3025 }
3026 }
3027 list_add(&cmd->list, &ftrace_commands);
3028 out_unlock:
3029 mutex_unlock(&ftrace_cmd_mutex);
3030
3031 return ret;
3032}
3033
3034int unregister_ftrace_command(struct ftrace_func_command *cmd)
3035{
3036 struct ftrace_func_command *p, *n;
3037 int ret = -ENODEV;
3038
3039 mutex_lock(&ftrace_cmd_mutex);
3040 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3041 if (strcmp(cmd->name, p->name) == 0) {
3042 ret = 0;
3043 list_del_init(&p->list);
3044 goto out_unlock;
3045 }
3046 }
3047 out_unlock:
3048 mutex_unlock(&ftrace_cmd_mutex);
3049
3050 return ret;
3051}
3052
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003053static int ftrace_process_regex(struct ftrace_hash *hash,
3054 char *buff, int len, int enable)
Steven Rostedt64e7c442009-02-13 17:08:48 -05003055{
Steven Rostedtf6180772009-02-14 00:40:25 -05003056 char *func, *command, *next = buff;
Steven Rostedt6a24a242009-02-17 11:20:26 -05003057 struct ftrace_func_command *p;
GuoWen Li0aff1c02011-06-01 19:18:47 +08003058 int ret = -EINVAL;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003059
3060 func = strsep(&next, ":");
3061
3062 if (!next) {
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003063 ret = ftrace_match_records(hash, func, len);
Steven Rostedtb448c4e2011-04-29 15:12:32 -04003064 if (!ret)
3065 ret = -EINVAL;
3066 if (ret < 0)
3067 return ret;
3068 return 0;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003069 }
3070
Steven Rostedtf6180772009-02-14 00:40:25 -05003071 /* command found */
Steven Rostedt64e7c442009-02-13 17:08:48 -05003072
3073 command = strsep(&next, ":");
3074
Steven Rostedtf6180772009-02-14 00:40:25 -05003075 mutex_lock(&ftrace_cmd_mutex);
3076 list_for_each_entry(p, &ftrace_commands, list) {
3077 if (strcmp(p->name, command) == 0) {
Steven Rostedt43dd61c2011-07-07 11:09:22 -04003078 ret = p->func(hash, func, command, next, enable);
Steven Rostedtf6180772009-02-14 00:40:25 -05003079 goto out_unlock;
3080 }
Steven Rostedt64e7c442009-02-13 17:08:48 -05003081 }
Steven Rostedtf6180772009-02-14 00:40:25 -05003082 out_unlock:
3083 mutex_unlock(&ftrace_cmd_mutex);
Steven Rostedt64e7c442009-02-13 17:08:48 -05003084
Steven Rostedtf6180772009-02-14 00:40:25 -05003085 return ret;
Steven Rostedt64e7c442009-02-13 17:08:48 -05003086}
3087
Ingo Molnare309b412008-05-12 21:20:51 +02003088static ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003089ftrace_regex_write(struct file *file, const char __user *ubuf,
3090 size_t cnt, loff_t *ppos, int enable)
Steven Rostedt5072c592008-05-12 21:20:43 +02003091{
3092 struct ftrace_iterator *iter;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003093 struct trace_parser *parser;
3094 ssize_t ret, read;
Steven Rostedt5072c592008-05-12 21:20:43 +02003095
Li Zefan4ba79782009-09-22 13:52:20 +08003096 if (!cnt)
Steven Rostedt5072c592008-05-12 21:20:43 +02003097 return 0;
3098
Steven Rostedt41c52c02008-05-22 11:46:33 -04003099 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003100
Steven Rostedt45a4a232011-04-21 23:16:46 -04003101 ret = -ENODEV;
3102 if (unlikely(ftrace_disabled))
3103 goto out_unlock;
3104
Steven Rostedt5072c592008-05-12 21:20:43 +02003105 if (file->f_mode & FMODE_READ) {
3106 struct seq_file *m = file->private_data;
3107 iter = m->private;
3108 } else
3109 iter = file->private_data;
3110
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003111 parser = &iter->parser;
3112 read = trace_get_user(parser, ubuf, cnt, ppos);
Steven Rostedt5072c592008-05-12 21:20:43 +02003113
Li Zefan4ba79782009-09-22 13:52:20 +08003114 if (read >= 0 && trace_parser_loaded(parser) &&
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003115 !trace_parser_cont(parser)) {
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003116 ret = ftrace_process_regex(iter->hash, parser->buffer,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003117 parser->idx, enable);
Li Zefan313254a2009-12-08 11:15:30 +08003118 trace_parser_clear(parser);
Steven Rostedt5072c592008-05-12 21:20:43 +02003119 if (ret)
Li Zefaned146b252009-11-03 08:55:38 +08003120 goto out_unlock;
Steven Rostedt5072c592008-05-12 21:20:43 +02003121 }
3122
Steven Rostedt5072c592008-05-12 21:20:43 +02003123 ret = read;
Li Zefaned146b252009-11-03 08:55:38 +08003124out_unlock:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003125 mutex_unlock(&ftrace_regex_lock);
Li Zefaned146b252009-11-03 08:55:38 +08003126
Steven Rostedt5072c592008-05-12 21:20:43 +02003127 return ret;
3128}
3129
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003130ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003131ftrace_filter_write(struct file *file, const char __user *ubuf,
3132 size_t cnt, loff_t *ppos)
3133{
3134 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3135}
3136
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003137ssize_t
Steven Rostedt41c52c02008-05-22 11:46:33 -04003138ftrace_notrace_write(struct file *file, const char __user *ubuf,
3139 size_t cnt, loff_t *ppos)
3140{
3141 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3142}
3143
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003144static int
Steven Rostedtf45948e2011-05-02 12:29:25 -04003145ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3146 int reset, int enable)
Steven Rostedt41c52c02008-05-22 11:46:33 -04003147{
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003148 struct ftrace_hash **orig_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003149 struct ftrace_hash *hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003150 int ret;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003151
Steven Rostedt936e0742011-05-05 22:54:01 -04003152 /* All global ops uses the global ops filters */
3153 if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3154 ops = &global_ops;
3155
Steven Rostedt41c52c02008-05-22 11:46:33 -04003156 if (unlikely(ftrace_disabled))
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003157 return -ENODEV;
Steven Rostedt41c52c02008-05-22 11:46:33 -04003158
Steven Rostedtf45948e2011-05-02 12:29:25 -04003159 if (enable)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003160 orig_hash = &ops->filter_hash;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003161 else
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003162 orig_hash = &ops->notrace_hash;
3163
3164 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3165 if (!hash)
3166 return -ENOMEM;
Steven Rostedtf45948e2011-05-02 12:29:25 -04003167
Steven Rostedt41c52c02008-05-22 11:46:33 -04003168 mutex_lock(&ftrace_regex_lock);
3169 if (reset)
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003170 ftrace_filter_reset(hash);
Jiri Olsaac483c42012-01-02 10:04:14 +01003171 if (buf && !ftrace_match_records(hash, buf, len)) {
3172 ret = -EINVAL;
3173 goto out_regex_unlock;
3174 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003175
3176 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003177 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
Steven Rostedt072126f2011-07-13 15:08:31 -04003178 if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
3179 && ftrace_enabled)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01003180 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedt072126f2011-07-13 15:08:31 -04003181
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003182 mutex_unlock(&ftrace_lock);
3183
Jiri Olsaac483c42012-01-02 10:04:14 +01003184 out_regex_unlock:
Steven Rostedt41c52c02008-05-22 11:46:33 -04003185 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003186
3187 free_ftrace_hash(hash);
3188 return ret;
Steven Rostedt41c52c02008-05-22 11:46:33 -04003189}
3190
Steven Rostedt77a2b372008-05-12 21:20:45 +02003191/**
3192 * ftrace_set_filter - set a function to filter on in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04003193 * @ops - the ops to set the filter with
Steven Rostedt77a2b372008-05-12 21:20:45 +02003194 * @buf - the string that holds the function filter text.
3195 * @len - the length of the string.
3196 * @reset - non zero to reset all filters before applying this filter.
3197 *
3198 * Filters denote which functions should be enabled when tracing is enabled.
3199 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3200 */
Jiri Olsaac483c42012-01-02 10:04:14 +01003201int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04003202 int len, int reset)
Steven Rostedt77a2b372008-05-12 21:20:45 +02003203{
Jiri Olsaac483c42012-01-02 10:04:14 +01003204 return ftrace_set_regex(ops, buf, len, reset, 1);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003205}
Steven Rostedt936e0742011-05-05 22:54:01 -04003206EXPORT_SYMBOL_GPL(ftrace_set_filter);
Steven Rostedt4eebcc82008-05-12 21:20:48 +02003207
Steven Rostedt41c52c02008-05-22 11:46:33 -04003208/**
3209 * ftrace_set_notrace - set a function to not trace in ftrace
Steven Rostedt936e0742011-05-05 22:54:01 -04003210 * @ops - the ops to set the notrace filter with
Steven Rostedt41c52c02008-05-22 11:46:33 -04003211 * @buf - the string that holds the function notrace text.
3212 * @len - the length of the string.
3213 * @reset - non zero to reset all filters before applying this filter.
3214 *
3215 * Notrace Filters denote which functions should not be enabled when tracing
3216 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3217 * for tracing.
3218 */
Jiri Olsaac483c42012-01-02 10:04:14 +01003219int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
Steven Rostedt936e0742011-05-05 22:54:01 -04003220 int len, int reset)
3221{
Jiri Olsaac483c42012-01-02 10:04:14 +01003222 return ftrace_set_regex(ops, buf, len, reset, 0);
Steven Rostedt936e0742011-05-05 22:54:01 -04003223}
3224EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3225/**
3226 * ftrace_set_filter - set a function to filter on in ftrace
3227 * @ops - the ops to set the filter with
3228 * @buf - the string that holds the function filter text.
3229 * @len - the length of the string.
3230 * @reset - non zero to reset all filters before applying this filter.
3231 *
3232 * Filters denote which functions should be enabled when tracing is enabled.
3233 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3234 */
3235void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3236{
3237 ftrace_set_regex(&global_ops, buf, len, reset, 1);
3238}
3239EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3240
3241/**
3242 * ftrace_set_notrace - set a function to not trace in ftrace
3243 * @ops - the ops to set the notrace filter with
3244 * @buf - the string that holds the function notrace text.
3245 * @len - the length of the string.
3246 * @reset - non zero to reset all filters before applying this filter.
3247 *
3248 * Notrace Filters denote which functions should not be enabled when tracing
3249 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3250 * for tracing.
3251 */
3252void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
Steven Rostedt41c52c02008-05-22 11:46:33 -04003253{
Steven Rostedtf45948e2011-05-02 12:29:25 -04003254 ftrace_set_regex(&global_ops, buf, len, reset, 0);
Steven Rostedt77a2b372008-05-12 21:20:45 +02003255}
Steven Rostedt936e0742011-05-05 22:54:01 -04003256EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
Steven Rostedt77a2b372008-05-12 21:20:45 +02003257
Steven Rostedt2af15d62009-05-28 13:37:24 -04003258/*
3259 * command line interface to allow users to set filters on boot up.
3260 */
3261#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3262static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3263static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3264
3265static int __init set_ftrace_notrace(char *str)
3266{
3267 strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3268 return 1;
3269}
3270__setup("ftrace_notrace=", set_ftrace_notrace);
3271
3272static int __init set_ftrace_filter(char *str)
3273{
3274 strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3275 return 1;
3276}
3277__setup("ftrace_filter=", set_ftrace_filter);
3278
Stefan Assmann369bc182009-10-12 22:17:21 +02003279#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Lai Jiangshanf6060f42009-11-05 11:16:17 +08003280static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
Steven Rostedt801c29f2010-03-05 20:02:19 -05003281static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3282
Stefan Assmann369bc182009-10-12 22:17:21 +02003283static int __init set_graph_function(char *str)
3284{
Frederic Weisbecker06f43d62009-10-14 20:43:39 +02003285 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
Stefan Assmann369bc182009-10-12 22:17:21 +02003286 return 1;
3287}
3288__setup("ftrace_graph_filter=", set_graph_function);
3289
3290static void __init set_ftrace_early_graph(char *buf)
3291{
3292 int ret;
3293 char *func;
3294
3295 while (buf) {
3296 func = strsep(&buf, ",");
3297 /* we allow only one expression at a time */
3298 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3299 func);
3300 if (ret)
3301 printk(KERN_DEBUG "ftrace: function %s not "
3302 "traceable\n", func);
3303 }
3304}
3305#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3306
Steven Rostedt2a85a372011-12-19 21:57:44 -05003307void __init
3308ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
Steven Rostedt2af15d62009-05-28 13:37:24 -04003309{
3310 char *func;
3311
3312 while (buf) {
3313 func = strsep(&buf, ",");
Steven Rostedtf45948e2011-05-02 12:29:25 -04003314 ftrace_set_regex(ops, func, strlen(func), 0, enable);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003315 }
3316}
3317
3318static void __init set_ftrace_early_filters(void)
3319{
3320 if (ftrace_filter_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05003321 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
Steven Rostedt2af15d62009-05-28 13:37:24 -04003322 if (ftrace_notrace_buf[0])
Steven Rostedt2a85a372011-12-19 21:57:44 -05003323 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
Stefan Assmann369bc182009-10-12 22:17:21 +02003324#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3325 if (ftrace_graph_buf[0])
3326 set_ftrace_early_graph(ftrace_graph_buf);
3327#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
Steven Rostedt2af15d62009-05-28 13:37:24 -04003328}
3329
Steven Rostedtfc13cb02011-12-19 14:41:25 -05003330int ftrace_regex_release(struct inode *inode, struct file *file)
Steven Rostedt5072c592008-05-12 21:20:43 +02003331{
3332 struct seq_file *m = (struct seq_file *)file->private_data;
3333 struct ftrace_iterator *iter;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003334 struct ftrace_hash **orig_hash;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003335 struct trace_parser *parser;
Steven Rostedted926f92011-05-03 13:25:24 -04003336 int filter_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003337 int ret;
Steven Rostedt5072c592008-05-12 21:20:43 +02003338
Steven Rostedt41c52c02008-05-22 11:46:33 -04003339 mutex_lock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003340 if (file->f_mode & FMODE_READ) {
3341 iter = m->private;
3342
3343 seq_release(inode, file);
3344 } else
3345 iter = file->private_data;
3346
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003347 parser = &iter->parser;
3348 if (trace_parser_loaded(parser)) {
3349 parser->buffer[parser->idx] = 0;
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003350 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
Steven Rostedt5072c592008-05-12 21:20:43 +02003351 }
3352
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003353 trace_parser_put(parser);
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003354
Steven Rostedt058e2972011-04-29 22:35:33 -04003355 if (file->f_mode & FMODE_WRITE) {
Steven Rostedted926f92011-05-03 13:25:24 -04003356 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3357
3358 if (filter_hash)
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003359 orig_hash = &iter->ops->filter_hash;
Steven Rostedted926f92011-05-03 13:25:24 -04003360 else
3361 orig_hash = &iter->ops->notrace_hash;
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003362
Steven Rostedt058e2972011-04-29 22:35:33 -04003363 mutex_lock(&ftrace_lock);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003364 ret = ftrace_hash_move(iter->ops, filter_hash,
3365 orig_hash, iter->hash);
3366 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3367 && ftrace_enabled)
Jiri Olsa30fb6aa2011-12-05 18:22:48 +01003368 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
Steven Rostedt41fb61c2011-07-13 15:03:44 -04003369
Steven Rostedt058e2972011-04-29 22:35:33 -04003370 mutex_unlock(&ftrace_lock);
3371 }
Steven Rostedt33dc9b12011-05-02 17:34:47 -04003372 free_ftrace_hash(iter->hash);
3373 kfree(iter);
Steven Rostedt058e2972011-04-29 22:35:33 -04003374
Steven Rostedt41c52c02008-05-22 11:46:33 -04003375 mutex_unlock(&ftrace_regex_lock);
Steven Rostedt5072c592008-05-12 21:20:43 +02003376 return 0;
3377}
3378
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003379static const struct file_operations ftrace_avail_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003380 .open = ftrace_avail_open,
3381 .read = seq_read,
3382 .llseek = seq_lseek,
Li Zefan3be04b42009-08-17 16:54:03 +08003383 .release = seq_release_private,
Steven Rostedt5072c592008-05-12 21:20:43 +02003384};
3385
Steven Rostedt647bcd02011-05-03 14:39:21 -04003386static const struct file_operations ftrace_enabled_fops = {
3387 .open = ftrace_enabled_open,
3388 .read = seq_read,
3389 .llseek = seq_lseek,
3390 .release = seq_release_private,
3391};
3392
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003393static const struct file_operations ftrace_filter_fops = {
Steven Rostedt5072c592008-05-12 21:20:43 +02003394 .open = ftrace_filter_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003395 .read = seq_read,
Steven Rostedt5072c592008-05-12 21:20:43 +02003396 .write = ftrace_filter_write,
Steven Rostedt98c4fd02010-09-10 11:47:43 -04003397 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003398 .release = ftrace_regex_release,
Steven Rostedt5072c592008-05-12 21:20:43 +02003399};
3400
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003401static const struct file_operations ftrace_notrace_fops = {
Steven Rostedt41c52c02008-05-22 11:46:33 -04003402 .open = ftrace_notrace_open,
Lai Jiangshan850a80c2009-03-13 17:47:23 +08003403 .read = seq_read,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003404 .write = ftrace_notrace_write,
3405 .llseek = ftrace_regex_lseek,
Steven Rostedt1cf41dd2011-04-29 20:59:51 -04003406 .release = ftrace_regex_release,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003407};
3408
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003409#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3410
3411static DEFINE_MUTEX(graph_lock);
3412
3413int ftrace_graph_count;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003414int ftrace_graph_filter_enabled;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003415unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3416
3417static void *
Li Zefan85951842009-06-24 09:54:00 +08003418__g_next(struct seq_file *m, loff_t *pos)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003419{
Li Zefan85951842009-06-24 09:54:00 +08003420 if (*pos >= ftrace_graph_count)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003421 return NULL;
Li Zefana4ec5e02009-09-18 14:06:28 +08003422 return &ftrace_graph_funcs[*pos];
Li Zefan85951842009-06-24 09:54:00 +08003423}
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003424
Li Zefan85951842009-06-24 09:54:00 +08003425static void *
3426g_next(struct seq_file *m, void *v, loff_t *pos)
3427{
3428 (*pos)++;
3429 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003430}
3431
3432static void *g_start(struct seq_file *m, loff_t *pos)
3433{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003434 mutex_lock(&graph_lock);
3435
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003436 /* Nothing, tell g_show to print all functions are enabled */
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003437 if (!ftrace_graph_filter_enabled && !*pos)
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003438 return (void *)1;
3439
Li Zefan85951842009-06-24 09:54:00 +08003440 return __g_next(m, pos);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003441}
3442
3443static void g_stop(struct seq_file *m, void *p)
3444{
3445 mutex_unlock(&graph_lock);
3446}
3447
3448static int g_show(struct seq_file *m, void *v)
3449{
3450 unsigned long *ptr = v;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003451
3452 if (!ptr)
3453 return 0;
3454
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003455 if (ptr == (unsigned long *)1) {
3456 seq_printf(m, "#### all functions enabled ####\n");
3457 return 0;
3458 }
3459
Steven Rostedtb375a112009-09-17 00:05:58 -04003460 seq_printf(m, "%ps\n", (void *)*ptr);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003461
3462 return 0;
3463}
3464
James Morris88e9d342009-09-22 16:43:43 -07003465static const struct seq_operations ftrace_graph_seq_ops = {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003466 .start = g_start,
3467 .next = g_next,
3468 .stop = g_stop,
3469 .show = g_show,
3470};
3471
3472static int
3473ftrace_graph_open(struct inode *inode, struct file *file)
3474{
3475 int ret = 0;
3476
3477 if (unlikely(ftrace_disabled))
3478 return -ENODEV;
3479
3480 mutex_lock(&graph_lock);
3481 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04003482 (file->f_flags & O_TRUNC)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003483 ftrace_graph_filter_enabled = 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003484 ftrace_graph_count = 0;
3485 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3486 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003487 mutex_unlock(&graph_lock);
3488
Li Zefana4ec5e02009-09-18 14:06:28 +08003489 if (file->f_mode & FMODE_READ)
3490 ret = seq_open(file, &ftrace_graph_seq_ops);
3491
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003492 return ret;
3493}
3494
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003495static int
Li Zefan87827112009-07-23 11:29:11 +08003496ftrace_graph_release(struct inode *inode, struct file *file)
3497{
3498 if (file->f_mode & FMODE_READ)
3499 seq_release(inode, file);
3500 return 0;
3501}
3502
3503static int
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003504ftrace_set_func(unsigned long *array, int *idx, char *buffer)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003505{
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003506 struct dyn_ftrace *rec;
3507 struct ftrace_page *pg;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003508 int search_len;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003509 int fail = 1;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003510 int type, not;
3511 char *search;
3512 bool exists;
3513 int i;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003514
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003515 /* decode regex */
Frederic Weisbecker3f6fe062009-09-24 21:31:51 +02003516 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003517 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3518 return -EBUSY;
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003519
3520 search_len = strlen(search);
3521
Steven Rostedt52baf112009-02-14 01:15:39 -05003522 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003523
3524 if (unlikely(ftrace_disabled)) {
3525 mutex_unlock(&ftrace_lock);
3526 return -ENODEV;
3527 }
3528
Steven Rostedt265c8312009-02-13 12:43:56 -05003529 do_for_each_ftrace_rec(pg, rec) {
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003530
Steven Rostedtb9df92d2011-04-28 20:32:08 -04003531 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003532 /* if it is in the array */
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003533 exists = false;
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003534 for (i = 0; i < *idx; i++) {
Frederic Weisbeckerf9349a82009-02-19 21:13:12 +01003535 if (array[i] == rec->ip) {
3536 exists = true;
Steven Rostedt265c8312009-02-13 12:43:56 -05003537 break;
3538 }
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003539 }
3540
3541 if (!not) {
3542 fail = 0;
3543 if (!exists) {
3544 array[(*idx)++] = rec->ip;
3545 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3546 goto out;
3547 }
3548 } else {
3549 if (exists) {
3550 array[i] = array[--(*idx)];
3551 array[*idx] = 0;
3552 fail = 0;
3553 }
3554 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003555 }
Steven Rostedt265c8312009-02-13 12:43:56 -05003556 } while_for_each_ftrace_rec();
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003557out:
Steven Rostedt52baf112009-02-14 01:15:39 -05003558 mutex_unlock(&ftrace_lock);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003559
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003560 if (fail)
3561 return -EINVAL;
3562
3563 ftrace_graph_filter_enabled = 1;
3564 return 0;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003565}
3566
3567static ssize_t
3568ftrace_graph_write(struct file *file, const char __user *ubuf,
3569 size_t cnt, loff_t *ppos)
3570{
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003571 struct trace_parser parser;
Li Zefan4ba79782009-09-22 13:52:20 +08003572 ssize_t read, ret;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003573
Li Zefanc7c6b1f2010-02-10 15:43:04 +08003574 if (!cnt)
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003575 return 0;
3576
3577 mutex_lock(&graph_lock);
3578
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003579 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3580 ret = -ENOMEM;
Li Zefan1eb90f12009-09-22 13:52:57 +08003581 goto out_unlock;
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003582 }
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003583
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003584 read = trace_get_user(&parser, ubuf, cnt, ppos);
3585
Li Zefan4ba79782009-09-22 13:52:20 +08003586 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003587 parser.buffer[parser.idx] = 0;
3588
3589 /* we allow only one expression at a time */
Li Zefana4ec5e02009-09-18 14:06:28 +08003590 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003591 parser.buffer);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003592 if (ret)
Li Zefan1eb90f12009-09-22 13:52:57 +08003593 goto out_free;
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003594 }
3595
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003596 ret = read;
Li Zefan1eb90f12009-09-22 13:52:57 +08003597
3598out_free:
jolsa@redhat.com689fd8b2009-09-11 17:29:29 +02003599 trace_parser_put(&parser);
Li Zefan1eb90f12009-09-22 13:52:57 +08003600out_unlock:
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003601 mutex_unlock(&graph_lock);
3602
3603 return ret;
3604}
3605
3606static const struct file_operations ftrace_graph_fops = {
Li Zefan87827112009-07-23 11:29:11 +08003607 .open = ftrace_graph_open,
3608 .read = seq_read,
3609 .write = ftrace_graph_write,
3610 .release = ftrace_graph_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +02003611 .llseek = seq_lseek,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003612};
3613#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3614
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003615static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
Steven Rostedt5072c592008-05-12 21:20:43 +02003616{
Steven Rostedt5072c592008-05-12 21:20:43 +02003617
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003618 trace_create_file("available_filter_functions", 0444,
3619 d_tracer, NULL, &ftrace_avail_fops);
Steven Rostedt5072c592008-05-12 21:20:43 +02003620
Steven Rostedt647bcd02011-05-03 14:39:21 -04003621 trace_create_file("enabled_functions", 0444,
3622 d_tracer, NULL, &ftrace_enabled_fops);
3623
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003624 trace_create_file("set_ftrace_filter", 0644, d_tracer,
3625 NULL, &ftrace_filter_fops);
Steven Rostedt41c52c02008-05-22 11:46:33 -04003626
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003627 trace_create_file("set_ftrace_notrace", 0644, d_tracer,
Steven Rostedt41c52c02008-05-22 11:46:33 -04003628 NULL, &ftrace_notrace_fops);
Steven Rostedtad90c0e2008-05-27 20:48:37 -04003629
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003630#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbecker5452af62009-03-27 00:25:38 +01003631 trace_create_file("set_graph_function", 0444, d_tracer,
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003632 NULL,
3633 &ftrace_graph_fops);
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05003634#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3635
Steven Rostedt5072c592008-05-12 21:20:43 +02003636 return 0;
3637}
3638
Steven Rostedt9fd49322012-04-24 22:32:06 -04003639static int ftrace_cmp_ips(const void *a, const void *b)
Steven Rostedt68950612011-12-16 17:06:45 -05003640{
Steven Rostedt9fd49322012-04-24 22:32:06 -04003641 const unsigned long *ipa = a;
3642 const unsigned long *ipb = b;
Steven Rostedt68950612011-12-16 17:06:45 -05003643
Steven Rostedt9fd49322012-04-24 22:32:06 -04003644 if (*ipa > *ipb)
3645 return 1;
3646 if (*ipa < *ipb)
3647 return -1;
3648 return 0;
3649}
3650
3651static void ftrace_swap_ips(void *a, void *b, int size)
3652{
3653 unsigned long *ipa = a;
3654 unsigned long *ipb = b;
3655 unsigned long t;
3656
3657 t = *ipa;
3658 *ipa = *ipb;
3659 *ipb = t;
Steven Rostedt68950612011-12-16 17:06:45 -05003660}
3661
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003662static int ftrace_process_locs(struct module *mod,
Steven Rostedt31e88902008-11-14 16:21:19 -08003663 unsigned long *start,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003664 unsigned long *end)
3665{
Steven Rostedt706c81f2012-04-24 23:45:26 -04003666 struct ftrace_page *start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05003667 struct ftrace_page *pg;
Steven Rostedt706c81f2012-04-24 23:45:26 -04003668 struct dyn_ftrace *rec;
Steven Rostedta7900872011-12-16 16:23:44 -05003669 unsigned long count;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003670 unsigned long *p;
3671 unsigned long addr;
Steven Rostedt4376cac2011-06-24 23:28:13 -04003672 unsigned long flags = 0; /* Shut up gcc */
Steven Rostedta7900872011-12-16 16:23:44 -05003673 int ret = -ENOMEM;
3674
3675 count = end - start;
3676
3677 if (!count)
3678 return 0;
3679
Steven Rostedt9fd49322012-04-24 22:32:06 -04003680 sort(start, count, sizeof(*start),
3681 ftrace_cmp_ips, ftrace_swap_ips);
3682
Steven Rostedt706c81f2012-04-24 23:45:26 -04003683 start_pg = ftrace_allocate_pages(count);
3684 if (!start_pg)
Steven Rostedta7900872011-12-16 16:23:44 -05003685 return -ENOMEM;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003686
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003687 mutex_lock(&ftrace_lock);
Steven Rostedta7900872011-12-16 16:23:44 -05003688
Steven Rostedt32082302011-12-16 14:42:37 -05003689 /*
3690 * Core and each module needs their own pages, as
3691 * modules will free them when they are removed.
3692 * Force a new page to be allocated for modules.
3693 */
Steven Rostedta7900872011-12-16 16:23:44 -05003694 if (!mod) {
3695 WARN_ON(ftrace_pages || ftrace_pages_start);
3696 /* First initialization */
Steven Rostedt706c81f2012-04-24 23:45:26 -04003697 ftrace_pages = ftrace_pages_start = start_pg;
Steven Rostedta7900872011-12-16 16:23:44 -05003698 } else {
Steven Rostedt32082302011-12-16 14:42:37 -05003699 if (!ftrace_pages)
Steven Rostedta7900872011-12-16 16:23:44 -05003700 goto out;
Steven Rostedt32082302011-12-16 14:42:37 -05003701
Steven Rostedta7900872011-12-16 16:23:44 -05003702 if (WARN_ON(ftrace_pages->next)) {
3703 /* Hmm, we have free pages? */
3704 while (ftrace_pages->next)
3705 ftrace_pages = ftrace_pages->next;
Steven Rostedt32082302011-12-16 14:42:37 -05003706 }
Steven Rostedta7900872011-12-16 16:23:44 -05003707
Steven Rostedt706c81f2012-04-24 23:45:26 -04003708 ftrace_pages->next = start_pg;
Steven Rostedt32082302011-12-16 14:42:37 -05003709 }
3710
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003711 p = start;
Steven Rostedt706c81f2012-04-24 23:45:26 -04003712 pg = start_pg;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003713 while (p < end) {
3714 addr = ftrace_call_adjust(*p++);
Steven Rostedt20e52272008-11-14 16:21:19 -08003715 /*
3716 * Some architecture linkers will pad between
3717 * the different mcount_loc sections of different
3718 * object files to satisfy alignments.
3719 * Skip any NULL pointers.
3720 */
3721 if (!addr)
3722 continue;
Steven Rostedt706c81f2012-04-24 23:45:26 -04003723
3724 if (pg->index == pg->size) {
3725 /* We should have allocated enough */
3726 if (WARN_ON(!pg->next))
3727 break;
3728 pg = pg->next;
3729 }
3730
3731 rec = &pg->records[pg->index++];
3732 rec->ip = addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003733 }
3734
Steven Rostedt706c81f2012-04-24 23:45:26 -04003735 /* We should have used all pages */
3736 WARN_ON(pg->next);
3737
3738 /* Assign the last page to ftrace_pages */
3739 ftrace_pages = pg;
3740
Steven Rostedt85ae32a2011-12-16 16:30:31 -05003741 /* These new locations need to be initialized */
Steven Rostedt706c81f2012-04-24 23:45:26 -04003742 ftrace_new_pgs = start_pg;
Steven Rostedt85ae32a2011-12-16 16:30:31 -05003743
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003744 /*
Steven Rostedt4376cac2011-06-24 23:28:13 -04003745 * We only need to disable interrupts on start up
3746 * because we are modifying code that an interrupt
3747 * may execute, and the modification is not atomic.
3748 * But for modules, nothing runs the code we modify
3749 * until we are finished with it, and there's no
3750 * reason to cause large interrupt latencies while we do it.
Steven Rostedta4f18ed2011-06-07 09:26:46 -04003751 */
Steven Rostedt4376cac2011-06-24 23:28:13 -04003752 if (!mod)
3753 local_irq_save(flags);
Steven Rostedt31e88902008-11-14 16:21:19 -08003754 ftrace_update_code(mod);
Steven Rostedt4376cac2011-06-24 23:28:13 -04003755 if (!mod)
3756 local_irq_restore(flags);
Steven Rostedta7900872011-12-16 16:23:44 -05003757 ret = 0;
3758 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05003759 mutex_unlock(&ftrace_lock);
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003760
Steven Rostedta7900872011-12-16 16:23:44 -05003761 return ret;
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003762}
3763
Steven Rostedt93eb6772009-04-15 13:24:06 -04003764#ifdef CONFIG_MODULES
Steven Rostedt32082302011-12-16 14:42:37 -05003765
3766#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
3767
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003768void ftrace_release_mod(struct module *mod)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003769{
3770 struct dyn_ftrace *rec;
Steven Rostedt32082302011-12-16 14:42:37 -05003771 struct ftrace_page **last_pg;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003772 struct ftrace_page *pg;
Steven Rostedta7900872011-12-16 16:23:44 -05003773 int order;
Steven Rostedt93eb6772009-04-15 13:24:06 -04003774
Steven Rostedt93eb6772009-04-15 13:24:06 -04003775 mutex_lock(&ftrace_lock);
Steven Rostedt45a4a232011-04-21 23:16:46 -04003776
3777 if (ftrace_disabled)
3778 goto out_unlock;
3779
Steven Rostedt32082302011-12-16 14:42:37 -05003780 /*
3781 * Each module has its own ftrace_pages, remove
3782 * them from the list.
3783 */
3784 last_pg = &ftrace_pages_start;
3785 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
3786 rec = &pg->records[0];
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003787 if (within_module_core(rec->ip, mod)) {
Steven Rostedt93eb6772009-04-15 13:24:06 -04003788 /*
Steven Rostedt32082302011-12-16 14:42:37 -05003789 * As core pages are first, the first
3790 * page should never be a module page.
Steven Rostedt93eb6772009-04-15 13:24:06 -04003791 */
Steven Rostedt32082302011-12-16 14:42:37 -05003792 if (WARN_ON(pg == ftrace_pages_start))
3793 goto out_unlock;
3794
3795 /* Check if we are deleting the last page */
3796 if (pg == ftrace_pages)
3797 ftrace_pages = next_to_ftrace_page(last_pg);
3798
3799 *last_pg = pg->next;
Steven Rostedta7900872011-12-16 16:23:44 -05003800 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3801 free_pages((unsigned long)pg->records, order);
3802 kfree(pg);
Steven Rostedt32082302011-12-16 14:42:37 -05003803 } else
3804 last_pg = &pg->next;
3805 }
Steven Rostedt45a4a232011-04-21 23:16:46 -04003806 out_unlock:
Steven Rostedt93eb6772009-04-15 13:24:06 -04003807 mutex_unlock(&ftrace_lock);
3808}
3809
3810static void ftrace_init_module(struct module *mod,
3811 unsigned long *start, unsigned long *end)
Steven Rostedt90d595f2008-08-14 15:45:09 -04003812{
Steven Rostedt00fd61a2008-08-15 21:40:04 -04003813 if (ftrace_disabled || start == end)
Steven Rostedtfed19392008-08-14 22:47:19 -04003814 return;
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003815 ftrace_process_locs(mod, start, end);
Steven Rostedt90d595f2008-08-14 15:45:09 -04003816}
3817
Steven Rostedt93eb6772009-04-15 13:24:06 -04003818static int ftrace_module_notify(struct notifier_block *self,
3819 unsigned long val, void *data)
3820{
3821 struct module *mod = data;
3822
3823 switch (val) {
3824 case MODULE_STATE_COMING:
3825 ftrace_init_module(mod, mod->ftrace_callsites,
3826 mod->ftrace_callsites +
3827 mod->num_ftrace_callsites);
3828 break;
3829 case MODULE_STATE_GOING:
jolsa@redhat.come7247a12009-10-07 19:00:35 +02003830 ftrace_release_mod(mod);
Steven Rostedt93eb6772009-04-15 13:24:06 -04003831 break;
3832 }
3833
3834 return 0;
3835}
3836#else
3837static int ftrace_module_notify(struct notifier_block *self,
3838 unsigned long val, void *data)
3839{
3840 return 0;
3841}
3842#endif /* CONFIG_MODULES */
3843
3844struct notifier_block ftrace_module_nb = {
3845 .notifier_call = ftrace_module_notify,
3846 .priority = 0,
3847};
3848
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003849extern unsigned long __start_mcount_loc[];
3850extern unsigned long __stop_mcount_loc[];
3851
3852void __init ftrace_init(void)
3853{
3854 unsigned long count, addr, flags;
3855 int ret;
3856
3857 /* Keep the ftrace pointer to the stub */
3858 addr = (unsigned long)ftrace_stub;
3859
3860 local_irq_save(flags);
3861 ftrace_dyn_arch_init(&addr);
3862 local_irq_restore(flags);
3863
3864 /* ftrace_dyn_arch_init places the return code in addr */
3865 if (addr)
3866 goto failed;
3867
3868 count = __stop_mcount_loc - __start_mcount_loc;
3869
3870 ret = ftrace_dyn_table_alloc(count);
3871 if (ret)
3872 goto failed;
3873
3874 last_ftrace_enabled = ftrace_enabled = 1;
3875
Jiri Olsa5cb084b2009-10-13 16:33:53 -04003876 ret = ftrace_process_locs(NULL,
Steven Rostedt31e88902008-11-14 16:21:19 -08003877 __start_mcount_loc,
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003878 __stop_mcount_loc);
3879
Steven Rostedt93eb6772009-04-15 13:24:06 -04003880 ret = register_module_notifier(&ftrace_module_nb);
Ming Lei24ed0c42009-05-17 15:31:38 +08003881 if (ret)
Steven Rostedt93eb6772009-04-15 13:24:06 -04003882 pr_warning("Failed to register trace ftrace module notifier\n");
3883
Steven Rostedt2af15d62009-05-28 13:37:24 -04003884 set_ftrace_early_filters();
3885
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003886 return;
3887 failed:
3888 ftrace_disabled = 1;
3889}
Steven Rostedt68bf21a2008-08-14 15:45:08 -04003890
Steven Rostedt3d083392008-05-12 21:20:42 +02003891#else
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003892
Steven Rostedt2b499382011-05-03 22:49:52 -04003893static struct ftrace_ops global_ops = {
Steven Rostedtbd69c302011-05-03 21:55:54 -04003894 .func = ftrace_stub,
3895};
3896
Frederic Weisbecker0b6e4d52008-10-28 20:17:38 +01003897static int __init ftrace_nodyn_init(void)
3898{
3899 ftrace_enabled = 1;
3900 return 0;
3901}
3902device_initcall(ftrace_nodyn_init);
3903
Steven Rostedtdf4fc312008-11-26 00:16:23 -05003904static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3905static inline void ftrace_startup_enable(int command) { }
Steven Rostedt5a45cfe2008-11-26 00:16:24 -05003906/* Keep as macros so we do not need to define the commands */
Steven Rostedt3b6cfdb2011-05-23 15:33:49 -04003907# define ftrace_startup(ops, command) \
3908 ({ \
3909 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
3910 0; \
3911 })
Steven Rostedtbd69c302011-05-03 21:55:54 -04003912# define ftrace_shutdown(ops, command) do { } while (0)
Ingo Molnarc7aafc52008-05-12 21:20:45 +02003913# define ftrace_startup_sysctl() do { } while (0)
3914# define ftrace_shutdown_sysctl() do { } while (0)
Steven Rostedtb8489142011-05-04 09:27:52 -04003915
3916static inline int
3917ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3918{
3919 return 1;
3920}
3921
Steven Rostedt3d083392008-05-12 21:20:42 +02003922#endif /* CONFIG_DYNAMIC_FTRACE */
3923
Steven Rostedtb8489142011-05-04 09:27:52 -04003924static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003925ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003926 struct ftrace_ops *op, struct pt_regs *regs)
Jiri Olsae2484912012-02-15 15:51:48 +01003927{
Jiri Olsae2484912012-02-15 15:51:48 +01003928 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
3929 return;
3930
3931 /*
3932 * Some of the ops may be dynamically allocated,
3933 * they must be freed after a synchronize_sched().
3934 */
3935 preempt_disable_notrace();
3936 trace_recursion_set(TRACE_CONTROL_BIT);
3937 op = rcu_dereference_raw(ftrace_control_list);
3938 while (op != &ftrace_list_end) {
3939 if (!ftrace_function_local_disabled(op) &&
3940 ftrace_ops_test(op, ip))
Steven Rostedta1e2e312011-08-09 12:50:46 -04003941 op->func(ip, parent_ip, op, regs);
Jiri Olsae2484912012-02-15 15:51:48 +01003942
3943 op = rcu_dereference_raw(op->next);
3944 };
3945 trace_recursion_clear(TRACE_CONTROL_BIT);
3946 preempt_enable_notrace();
3947}
3948
3949static struct ftrace_ops control_ops = {
3950 .func = ftrace_ops_control_func,
3951};
3952
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003953static inline void
3954__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003955 struct ftrace_ops *ignored, struct pt_regs *regs)
Steven Rostedtb8489142011-05-04 09:27:52 -04003956{
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003957 struct ftrace_ops *op;
Steven Rostedtb8489142011-05-04 09:27:52 -04003958
Steven Rostedtccf36722012-06-05 09:44:25 -04003959 if (function_trace_stop)
3960 return;
3961
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003962 if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3963 return;
3964
3965 trace_recursion_set(TRACE_INTERNAL_BIT);
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003966 /*
3967 * Some of the ops may be dynamically allocated,
3968 * they must be freed after a synchronize_sched().
3969 */
3970 preempt_disable_notrace();
3971 op = rcu_dereference_raw(ftrace_ops_list);
Steven Rostedtb8489142011-05-04 09:27:52 -04003972 while (op != &ftrace_list_end) {
3973 if (ftrace_ops_test(op, ip))
Steven Rostedta1e2e312011-08-09 12:50:46 -04003974 op->func(ip, parent_ip, op, regs);
Steven Rostedtb8489142011-05-04 09:27:52 -04003975 op = rcu_dereference_raw(op->next);
3976 };
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04003977 preempt_enable_notrace();
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04003978 trace_recursion_clear(TRACE_INTERNAL_BIT);
Steven Rostedtb8489142011-05-04 09:27:52 -04003979}
3980
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003981/*
3982 * Some archs only support passing ip and parent_ip. Even though
3983 * the list function ignores the op parameter, we do not want any
3984 * C side effects, where a function is called without the caller
3985 * sending a third parameter.
Steven Rostedta1e2e312011-08-09 12:50:46 -04003986 * Archs are to support both the regs and ftrace_ops at the same time.
3987 * If they support ftrace_ops, it is assumed they support regs.
3988 * If call backs want to use regs, they must either check for regs
3989 * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
3990 * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
3991 * An architecture can pass partial regs with ftrace_ops and still
3992 * set the ARCH_SUPPORT_FTARCE_OPS.
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003993 */
3994#if ARCH_SUPPORTS_FTRACE_OPS
3995static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04003996 struct ftrace_ops *op, struct pt_regs *regs)
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003997{
Steven Rostedta1e2e312011-08-09 12:50:46 -04003998 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04003999}
4000#else
4001static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4002{
Steven Rostedta1e2e312011-08-09 12:50:46 -04004003 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04004004}
4005#endif
4006
Steven Rostedte32d8952008-12-04 00:26:41 -05004007static void clear_ftrace_swapper(void)
4008{
4009 struct task_struct *p;
4010 int cpu;
4011
4012 get_online_cpus();
4013 for_each_online_cpu(cpu) {
4014 p = idle_task(cpu);
4015 clear_tsk_trace_trace(p);
4016 }
4017 put_online_cpus();
4018}
4019
4020static void set_ftrace_swapper(void)
4021{
4022 struct task_struct *p;
4023 int cpu;
4024
4025 get_online_cpus();
4026 for_each_online_cpu(cpu) {
4027 p = idle_task(cpu);
4028 set_tsk_trace_trace(p);
4029 }
4030 put_online_cpus();
4031}
4032
4033static void clear_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05004034{
4035 struct task_struct *p;
4036
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004037 rcu_read_lock();
Steven Rostedte32d8952008-12-04 00:26:41 -05004038 do_each_pid_task(pid, PIDTYPE_PID, p) {
Steven Rostedt978f3a42008-12-04 00:26:40 -05004039 clear_tsk_trace_trace(p);
Steven Rostedte32d8952008-12-04 00:26:41 -05004040 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004041 rcu_read_unlock();
4042
Steven Rostedte32d8952008-12-04 00:26:41 -05004043 put_pid(pid);
Steven Rostedt978f3a42008-12-04 00:26:40 -05004044}
4045
Steven Rostedte32d8952008-12-04 00:26:41 -05004046static void set_ftrace_pid(struct pid *pid)
Steven Rostedt978f3a42008-12-04 00:26:40 -05004047{
4048 struct task_struct *p;
4049
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004050 rcu_read_lock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05004051 do_each_pid_task(pid, PIDTYPE_PID, p) {
4052 set_tsk_trace_trace(p);
4053 } while_each_pid_task(pid, PIDTYPE_PID, p);
Oleg Nesterov229c4ef2009-02-03 20:39:04 +01004054 rcu_read_unlock();
Steven Rostedt978f3a42008-12-04 00:26:40 -05004055}
4056
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004057static void clear_ftrace_pid_task(struct pid *pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05004058{
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004059 if (pid == ftrace_swapper_pid)
Steven Rostedte32d8952008-12-04 00:26:41 -05004060 clear_ftrace_swapper();
4061 else
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004062 clear_ftrace_pid(pid);
Steven Rostedte32d8952008-12-04 00:26:41 -05004063}
4064
4065static void set_ftrace_pid_task(struct pid *pid)
4066{
4067 if (pid == ftrace_swapper_pid)
4068 set_ftrace_swapper();
4069 else
4070 set_ftrace_pid(pid);
4071}
4072
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004073static int ftrace_pid_add(int p)
4074{
4075 struct pid *pid;
4076 struct ftrace_pid *fpid;
4077 int ret = -EINVAL;
4078
4079 mutex_lock(&ftrace_lock);
4080
4081 if (!p)
4082 pid = ftrace_swapper_pid;
4083 else
4084 pid = find_get_pid(p);
4085
4086 if (!pid)
4087 goto out;
4088
4089 ret = 0;
4090
4091 list_for_each_entry(fpid, &ftrace_pids, list)
4092 if (fpid->pid == pid)
4093 goto out_put;
4094
4095 ret = -ENOMEM;
4096
4097 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4098 if (!fpid)
4099 goto out_put;
4100
4101 list_add(&fpid->list, &ftrace_pids);
4102 fpid->pid = pid;
4103
4104 set_ftrace_pid_task(pid);
4105
4106 ftrace_update_pid_func();
4107 ftrace_startup_enable(0);
4108
4109 mutex_unlock(&ftrace_lock);
4110 return 0;
4111
4112out_put:
4113 if (pid != ftrace_swapper_pid)
4114 put_pid(pid);
4115
4116out:
4117 mutex_unlock(&ftrace_lock);
4118 return ret;
4119}
4120
4121static void ftrace_pid_reset(void)
4122{
4123 struct ftrace_pid *fpid, *safe;
4124
4125 mutex_lock(&ftrace_lock);
4126 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4127 struct pid *pid = fpid->pid;
4128
4129 clear_ftrace_pid_task(pid);
4130
4131 list_del(&fpid->list);
4132 kfree(fpid);
4133 }
4134
4135 ftrace_update_pid_func();
4136 ftrace_startup_enable(0);
4137
4138 mutex_unlock(&ftrace_lock);
4139}
4140
4141static void *fpid_start(struct seq_file *m, loff_t *pos)
4142{
4143 mutex_lock(&ftrace_lock);
4144
4145 if (list_empty(&ftrace_pids) && (!*pos))
4146 return (void *) 1;
4147
4148 return seq_list_start(&ftrace_pids, *pos);
4149}
4150
4151static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4152{
4153 if (v == (void *)1)
4154 return NULL;
4155
4156 return seq_list_next(v, &ftrace_pids, pos);
4157}
4158
4159static void fpid_stop(struct seq_file *m, void *p)
4160{
4161 mutex_unlock(&ftrace_lock);
4162}
4163
4164static int fpid_show(struct seq_file *m, void *v)
4165{
4166 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4167
4168 if (v == (void *)1) {
4169 seq_printf(m, "no pid\n");
4170 return 0;
4171 }
4172
4173 if (fpid->pid == ftrace_swapper_pid)
4174 seq_printf(m, "swapper tasks\n");
4175 else
4176 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4177
4178 return 0;
4179}
4180
4181static const struct seq_operations ftrace_pid_sops = {
4182 .start = fpid_start,
4183 .next = fpid_next,
4184 .stop = fpid_stop,
4185 .show = fpid_show,
4186};
4187
4188static int
4189ftrace_pid_open(struct inode *inode, struct file *file)
4190{
4191 int ret = 0;
4192
4193 if ((file->f_mode & FMODE_WRITE) &&
4194 (file->f_flags & O_TRUNC))
4195 ftrace_pid_reset();
4196
4197 if (file->f_mode & FMODE_READ)
4198 ret = seq_open(file, &ftrace_pid_sops);
4199
4200 return ret;
4201}
4202
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004203static ssize_t
4204ftrace_pid_write(struct file *filp, const char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4206{
Ingo Molnar457dc922009-11-23 11:03:28 +01004207 char buf[64], *tmp;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004208 long val;
4209 int ret;
4210
4211 if (cnt >= sizeof(buf))
4212 return -EINVAL;
4213
4214 if (copy_from_user(&buf, ubuf, cnt))
4215 return -EFAULT;
4216
4217 buf[cnt] = 0;
4218
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004219 /*
4220 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4221 * to clean the filter quietly.
4222 */
Ingo Molnar457dc922009-11-23 11:03:28 +01004223 tmp = strstrip(buf);
4224 if (strlen(tmp) == 0)
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004225 return 1;
4226
Ingo Molnar457dc922009-11-23 11:03:28 +01004227 ret = strict_strtol(tmp, 10, &val);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004228 if (ret < 0)
4229 return ret;
4230
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004231 ret = ftrace_pid_add(val);
Steven Rostedt978f3a42008-12-04 00:26:40 -05004232
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004233 return ret ? ret : cnt;
4234}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004235
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004236static int
4237ftrace_pid_release(struct inode *inode, struct file *file)
4238{
4239 if (file->f_mode & FMODE_READ)
4240 seq_release(inode, file);
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004241
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004242 return 0;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004243}
4244
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004245static const struct file_operations ftrace_pid_fops = {
jolsa@redhat.com756d17e2009-10-13 16:33:52 -04004246 .open = ftrace_pid_open,
4247 .write = ftrace_pid_write,
4248 .read = seq_read,
4249 .llseek = seq_lseek,
4250 .release = ftrace_pid_release,
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004251};
4252
4253static __init int ftrace_init_debugfs(void)
4254{
4255 struct dentry *d_tracer;
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004256
4257 d_tracer = tracing_init_dentry();
4258 if (!d_tracer)
4259 return 0;
4260
4261 ftrace_init_dyn_debugfs(d_tracer);
4262
Frederic Weisbecker5452af62009-03-27 00:25:38 +01004263 trace_create_file("set_ftrace_pid", 0644, d_tracer,
4264 NULL, &ftrace_pid_fops);
Steven Rostedt493762f2009-03-23 17:12:36 -04004265
4266 ftrace_profile_debugfs(d_tracer);
4267
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004268 return 0;
4269}
Steven Rostedtdf4fc312008-11-26 00:16:23 -05004270fs_initcall(ftrace_init_debugfs);
4271
Steven Rostedt3d083392008-05-12 21:20:42 +02004272/**
Steven Rostedt81adbdc2008-10-23 09:33:02 -04004273 * ftrace_kill - kill ftrace
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004274 *
4275 * This function should be used by panic code. It stops ftrace
4276 * but in a not so nice way. If you need to simply kill ftrace
4277 * from a non-atomic section, use ftrace_kill.
4278 */
Steven Rostedt81adbdc2008-10-23 09:33:02 -04004279void ftrace_kill(void)
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004280{
4281 ftrace_disabled = 1;
4282 ftrace_enabled = 0;
Steven Rostedta2bb6a32008-07-10 20:58:15 -04004283 clear_ftrace_function();
4284}
4285
4286/**
Steven Rostedte0a413f2011-09-29 21:26:16 -04004287 * Test if ftrace is dead or not.
4288 */
4289int ftrace_is_dead(void)
4290{
4291 return ftrace_disabled;
4292}
4293
4294/**
Steven Rostedt3d083392008-05-12 21:20:42 +02004295 * register_ftrace_function - register a function for profiling
4296 * @ops - ops structure that holds the function for profiling.
4297 *
4298 * Register a function to be called by all functions in the
4299 * kernel.
4300 *
4301 * Note: @ops->func and all the functions it calls must be labeled
4302 * with "notrace", otherwise it will go into a
4303 * recursive loop.
4304 */
4305int register_ftrace_function(struct ftrace_ops *ops)
4306{
Steven Rostedt45a4a232011-04-21 23:16:46 -04004307 int ret = -1;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004308
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004309 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004310
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004311 ret = __register_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04004312 if (!ret)
Steven Rostedta1cd6172011-05-23 15:24:25 -04004313 ret = ftrace_startup(ops, 0);
Steven Rostedtb8489142011-05-04 09:27:52 -04004314
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004315 mutex_unlock(&ftrace_lock);
Borislav Petkov8d240dd2012-03-29 19:11:40 +02004316
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004317 return ret;
Steven Rostedt3d083392008-05-12 21:20:42 +02004318}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04004319EXPORT_SYMBOL_GPL(register_ftrace_function);
Steven Rostedt3d083392008-05-12 21:20:42 +02004320
4321/**
Uwe Kleine-Koenig32632922009-01-12 23:35:50 +01004322 * unregister_ftrace_function - unregister a function for profiling.
Steven Rostedt3d083392008-05-12 21:20:42 +02004323 * @ops - ops structure that holds the function to unregister
4324 *
4325 * Unregister a function that was added to be called by ftrace profiling.
4326 */
4327int unregister_ftrace_function(struct ftrace_ops *ops)
4328{
4329 int ret;
4330
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004331 mutex_lock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02004332 ret = __unregister_ftrace_function(ops);
Steven Rostedtb8489142011-05-04 09:27:52 -04004333 if (!ret)
4334 ftrace_shutdown(ops, 0);
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004335 mutex_unlock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004336
4337 return ret;
4338}
Steven Rostedtcdbe61b2011-05-05 21:14:55 -04004339EXPORT_SYMBOL_GPL(unregister_ftrace_function);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004340
Ingo Molnare309b412008-05-12 21:20:51 +02004341int
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004342ftrace_enable_sysctl(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07004343 void __user *buffer, size_t *lenp,
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004344 loff_t *ppos)
4345{
Steven Rostedt45a4a232011-04-21 23:16:46 -04004346 int ret = -ENODEV;
Steven Rostedt4eebcc82008-05-12 21:20:48 +02004347
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004348 mutex_lock(&ftrace_lock);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004349
Steven Rostedt45a4a232011-04-21 23:16:46 -04004350 if (unlikely(ftrace_disabled))
4351 goto out;
4352
4353 ret = proc_dointvec(table, write, buffer, lenp, ppos);
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004354
Li Zefana32c7762009-06-26 16:55:51 +08004355 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004356 goto out;
4357
Li Zefana32c7762009-06-26 16:55:51 +08004358 last_ftrace_enabled = !!ftrace_enabled;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004359
4360 if (ftrace_enabled) {
4361
4362 ftrace_startup_sysctl();
4363
4364 /* we are starting ftrace again */
Steven Rostedtb8489142011-05-04 09:27:52 -04004365 if (ftrace_ops_list != &ftrace_list_end) {
4366 if (ftrace_ops_list->next == &ftrace_list_end)
4367 ftrace_trace_function = ftrace_ops_list->func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004368 else
Steven Rostedtb8489142011-05-04 09:27:52 -04004369 ftrace_trace_function = ftrace_ops_list_func;
Steven Rostedtb0fc4942008-05-12 21:20:43 +02004370 }
4371
4372 } else {
4373 /* stopping ftrace calls (just send to ftrace_stub) */
4374 ftrace_trace_function = ftrace_stub;
4375
4376 ftrace_shutdown_sysctl();
4377 }
4378
4379 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004380 mutex_unlock(&ftrace_lock);
Steven Rostedt3d083392008-05-12 21:20:42 +02004381 return ret;
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02004382}
Ingo Molnarf17845e2008-10-24 12:47:10 +02004383
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004384#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004385
Steven Rostedt597af812009-04-03 15:24:12 -04004386static int ftrace_graph_active;
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004387static struct notifier_block ftrace_suspend_notifier;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004388
Steven Rostedte49dc192008-12-02 23:50:05 -05004389int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4390{
4391 return 0;
4392}
4393
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004394/* The callbacks that hook a function */
4395trace_func_graph_ret_t ftrace_graph_return =
4396 (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004397trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004398
4399/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4400static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4401{
4402 int i;
4403 int ret = 0;
4404 unsigned long flags;
4405 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4406 struct task_struct *g, *t;
4407
4408 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4409 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4410 * sizeof(struct ftrace_ret_stack),
4411 GFP_KERNEL);
4412 if (!ret_stack_list[i]) {
4413 start = 0;
4414 end = i;
4415 ret = -ENOMEM;
4416 goto free;
4417 }
4418 }
4419
4420 read_lock_irqsave(&tasklist_lock, flags);
4421 do_each_thread(g, t) {
4422 if (start == end) {
4423 ret = -EAGAIN;
4424 goto unlock;
4425 }
4426
4427 if (t->ret_stack == NULL) {
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01004428 atomic_set(&t->tracing_graph_pause, 0);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004429 atomic_set(&t->trace_overrun, 0);
Steven Rostedt26c01622009-06-02 14:01:19 -04004430 t->curr_ret_stack = -1;
4431 /* Make sure the tasks see the -1 first: */
4432 smp_wmb();
4433 t->ret_stack = ret_stack_list[start++];
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004434 }
4435 } while_each_thread(g, t);
4436
4437unlock:
4438 read_unlock_irqrestore(&tasklist_lock, flags);
4439free:
4440 for (i = start; i < end; i++)
4441 kfree(ret_stack_list[i]);
4442 return ret;
4443}
4444
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004445static void
Steven Rostedt38516ab2010-04-20 17:04:50 -04004446ftrace_graph_probe_sched_switch(void *ignore,
4447 struct task_struct *prev, struct task_struct *next)
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004448{
4449 unsigned long long timestamp;
4450 int index;
4451
Steven Rostedtbe6f1642009-03-24 11:06:24 -04004452 /*
4453 * Does the user want to count the time a function was asleep.
4454 * If so, do not update the time stamps.
4455 */
4456 if (trace_flags & TRACE_ITER_SLEEP_TIME)
4457 return;
4458
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004459 timestamp = trace_clock_local();
4460
4461 prev->ftrace_timestamp = timestamp;
4462
4463 /* only process tasks that we timestamped */
4464 if (!next->ftrace_timestamp)
4465 return;
4466
4467 /*
4468 * Update all the counters in next to make up for the
4469 * time next was sleeping.
4470 */
4471 timestamp -= next->ftrace_timestamp;
4472
4473 for (index = next->curr_ret_stack; index >= 0; index--)
4474 next->ret_stack[index].calltime += timestamp;
4475}
4476
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004477/* Allocate a return stack for each task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004478static int start_graph_tracing(void)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004479{
4480 struct ftrace_ret_stack **ret_stack_list;
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004481 int ret, cpu;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004482
4483 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4484 sizeof(struct ftrace_ret_stack *),
4485 GFP_KERNEL);
4486
4487 if (!ret_stack_list)
4488 return -ENOMEM;
4489
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004490 /* The cpu_boot init_task->ret_stack will never be freed */
Steven Rostedt179c4982009-06-02 12:03:19 -04004491 for_each_online_cpu(cpu) {
4492 if (!idle_task(cpu)->ret_stack)
Steven Rostedt868baf02011-02-10 21:26:13 -05004493 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
Steven Rostedt179c4982009-06-02 12:03:19 -04004494 }
Frederic Weisbecker5b058bc2009-02-17 18:35:34 +01004495
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004496 do {
4497 ret = alloc_retstack_tasklist(ret_stack_list);
4498 } while (ret == -EAGAIN);
4499
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004500 if (!ret) {
Steven Rostedt38516ab2010-04-20 17:04:50 -04004501 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Steven Rostedt8aef2d22009-03-24 01:10:15 -04004502 if (ret)
4503 pr_info("ftrace_graph: Couldn't activate tracepoint"
4504 " probe to kernel_sched_switch\n");
4505 }
4506
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004507 kfree(ret_stack_list);
4508 return ret;
4509}
4510
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004511/*
4512 * Hibernation protection.
4513 * The state of the current task is too much unstable during
4514 * suspend/restore to disk. We want to protect against that.
4515 */
4516static int
4517ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4518 void *unused)
4519{
4520 switch (state) {
4521 case PM_HIBERNATION_PREPARE:
4522 pause_graph_tracing();
4523 break;
4524
4525 case PM_POST_HIBERNATION:
4526 unpause_graph_tracing();
4527 break;
4528 }
4529 return NOTIFY_DONE;
4530}
4531
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004532int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4533 trace_func_graph_ent_t entryfunc)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004534{
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004535 int ret = 0;
4536
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004537 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004538
Steven Rostedt05ce5812009-03-24 00:18:31 -04004539 /* we currently allow only one tracer registered at a time */
Steven Rostedt597af812009-04-03 15:24:12 -04004540 if (ftrace_graph_active) {
Steven Rostedt05ce5812009-03-24 00:18:31 -04004541 ret = -EBUSY;
4542 goto out;
4543 }
4544
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004545 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4546 register_pm_notifier(&ftrace_suspend_notifier);
4547
Steven Rostedt597af812009-04-03 15:24:12 -04004548 ftrace_graph_active++;
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004549 ret = start_graph_tracing();
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004550 if (ret) {
Steven Rostedt597af812009-04-03 15:24:12 -04004551 ftrace_graph_active--;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004552 goto out;
4553 }
Steven Rostedte53a6312008-11-26 00:16:25 -05004554
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004555 ftrace_graph_return = retfunc;
4556 ftrace_graph_entry = entryfunc;
Steven Rostedte53a6312008-11-26 00:16:25 -05004557
Steven Rostedta1cd6172011-05-23 15:24:25 -04004558 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004559
4560out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004561 mutex_unlock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004562 return ret;
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004563}
4564
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004565void unregister_ftrace_graph(void)
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004566{
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004567 mutex_lock(&ftrace_lock);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004568
Steven Rostedt597af812009-04-03 15:24:12 -04004569 if (unlikely(!ftrace_graph_active))
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004570 goto out;
4571
Steven Rostedt597af812009-04-03 15:24:12 -04004572 ftrace_graph_active--;
Frederic Weisbecker287b6e62008-11-26 00:57:25 +01004573 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
Steven Rostedte49dc192008-12-02 23:50:05 -05004574 ftrace_graph_entry = ftrace_graph_entry_stub;
Steven Rostedtbd69c302011-05-03 21:55:54 -04004575 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
Frederic Weisbecker4a2b8dd2009-01-14 13:33:27 -08004576 unregister_pm_notifier(&ftrace_suspend_notifier);
Steven Rostedt38516ab2010-04-20 17:04:50 -04004577 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01004578
Steven Rostedt2aad1b72009-03-30 11:11:28 -04004579 out:
Steven Rostedte6ea44e2009-02-14 01:42:44 -05004580 mutex_unlock(&ftrace_lock);
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004581}
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004582
Steven Rostedt868baf02011-02-10 21:26:13 -05004583static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4584
4585static void
4586graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4587{
4588 atomic_set(&t->tracing_graph_pause, 0);
4589 atomic_set(&t->trace_overrun, 0);
4590 t->ftrace_timestamp = 0;
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004591 /* make curr_ret_stack visible before we add the ret_stack */
Steven Rostedt868baf02011-02-10 21:26:13 -05004592 smp_wmb();
4593 t->ret_stack = ret_stack;
4594}
4595
4596/*
4597 * Allocate a return stack for the idle task. May be the first
4598 * time through, or it may be done by CPU hotplug online.
4599 */
4600void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4601{
4602 t->curr_ret_stack = -1;
4603 /*
4604 * The idle task has no parent, it either has its own
4605 * stack or no stack at all.
4606 */
4607 if (t->ret_stack)
4608 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4609
4610 if (ftrace_graph_active) {
4611 struct ftrace_ret_stack *ret_stack;
4612
4613 ret_stack = per_cpu(idle_ret_stack, cpu);
4614 if (!ret_stack) {
4615 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4616 * sizeof(struct ftrace_ret_stack),
4617 GFP_KERNEL);
4618 if (!ret_stack)
4619 return;
4620 per_cpu(idle_ret_stack, cpu) = ret_stack;
4621 }
4622 graph_init_task(t, ret_stack);
4623 }
4624}
4625
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004626/* Allocate a return stack for newly created task */
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004627void ftrace_graph_init_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004628{
Steven Rostedt84047e32009-06-02 16:51:55 -04004629 /* Make sure we do not use the parent ret_stack */
4630 t->ret_stack = NULL;
Steven Rostedtea14eb72010-03-12 19:41:23 -05004631 t->curr_ret_stack = -1;
Steven Rostedt84047e32009-06-02 16:51:55 -04004632
Steven Rostedt597af812009-04-03 15:24:12 -04004633 if (ftrace_graph_active) {
Steven Rostedt82310a32009-06-02 12:26:07 -04004634 struct ftrace_ret_stack *ret_stack;
4635
4636 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004637 * sizeof(struct ftrace_ret_stack),
4638 GFP_KERNEL);
Steven Rostedt82310a32009-06-02 12:26:07 -04004639 if (!ret_stack)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004640 return;
Steven Rostedt868baf02011-02-10 21:26:13 -05004641 graph_init_task(t, ret_stack);
Steven Rostedt84047e32009-06-02 16:51:55 -04004642 }
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004643}
4644
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01004645void ftrace_graph_exit_task(struct task_struct *t)
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004646{
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004647 struct ftrace_ret_stack *ret_stack = t->ret_stack;
4648
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004649 t->ret_stack = NULL;
Frederic Weisbeckereae849c2008-11-23 17:33:12 +01004650 /* NULL must become visible to IRQs before we free it: */
4651 barrier();
4652
4653 kfree(ret_stack);
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01004654}
Steven Rostedt14a866c2008-12-02 23:50:02 -05004655
4656void ftrace_graph_stop(void)
4657{
4658 ftrace_stop();
4659}
Frederic Weisbecker15e6cb32008-11-11 07:14:25 +01004660#endif