blob: 22826c73a9da600992184fe8d5ca75e25f26346e [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Steven Rostedte6187002009-04-15 13:36:40 -040011#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040019#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050020
Li Zefan020e5f82009-07-01 10:47:05 +080021#include <asm/setup.h>
22
Steven Rostedt91729ef2009-03-02 15:03:01 -050023#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050024
Steven Rostedt4e5292e2009-09-12 19:26:21 -040025#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050026#define TRACE_SYSTEM "TRACE_SYSTEM"
27
Li Zefan20c89282009-05-06 10:33:45 +080028DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050029
Steven Rostedta59fd602009-04-10 13:52:20 -040030LIST_HEAD(ftrace_events);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080031static LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040032
Steven Rostedtd1a29142013-02-27 20:23:57 -050033#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
34
35static struct kmem_cache *field_cachep;
36static struct kmem_cache *file_cachep;
37
Steven Rostedt6e94a782013-06-27 10:58:31 -040038#define SYSTEM_FL_FREE_NAME (1 << 31)
39
40static inline int system_refcount(struct event_subsystem *system)
41{
42 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
43}
44
45static int system_refcount_inc(struct event_subsystem *system)
46{
47 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
48}
49
50static int system_refcount_dec(struct event_subsystem *system)
51{
52 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
53}
54
Steven Rostedtae63b312012-05-03 23:09:03 -040055/* Double loops, do not use break, only goto's work */
56#define do_for_each_event_file(tr, file) \
57 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
58 list_for_each_entry(file, &tr->events, list)
59
60#define do_for_each_event_file_safe(tr, file) \
61 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
62 struct ftrace_event_file *___n; \
63 list_for_each_entry_safe(file, ___n, &tr->events, list)
64
65#define while_for_each_event_file() \
66 }
67
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080068static struct list_head *
Steven Rostedt2e33af02010-04-22 10:35:55 -040069trace_get_fields(struct ftrace_event_call *event_call)
70{
71 if (!event_call->class->get_fields)
72 return &event_call->class->fields;
73 return event_call->class->get_fields(event_call);
74}
75
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080076static struct ftrace_event_field *
77__find_event_field(struct list_head *head, char *name)
78{
79 struct ftrace_event_field *field;
80
81 list_for_each_entry(field, head, link) {
82 if (!strcmp(field->name, name))
83 return field;
84 }
85
86 return NULL;
87}
88
89struct ftrace_event_field *
90trace_find_event_field(struct ftrace_event_call *call, char *name)
91{
92 struct ftrace_event_field *field;
93 struct list_head *head;
94
95 field = __find_event_field(&ftrace_common_fields, name);
96 if (field)
97 return field;
98
99 head = trace_get_fields(call);
100 return __find_event_field(head, name);
101}
102
Li Zefan8728fe52010-05-24 16:22:49 +0800103static int __trace_define_field(struct list_head *head, const char *type,
104 const char *name, int offset, int size,
105 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -0500106{
107 struct ftrace_event_field *field;
108
Steven Rostedtd1a29142013-02-27 20:23:57 -0500109 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
Tom Zanussicf027f62009-03-22 03:30:39 -0500110 if (!field)
Namhyung Kimaaf6ac02013-06-07 15:07:48 +0900111 return -ENOMEM;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100112
Steven Rostedt92edca02013-02-27 20:41:37 -0500113 field->name = name;
114 field->type = type;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100115
Li Zefan43b51ea2009-08-07 10:33:22 +0800116 if (filter_type == FILTER_OTHER)
117 field->filter_type = filter_assign_type(type);
118 else
119 field->filter_type = filter_type;
120
Tom Zanussicf027f62009-03-22 03:30:39 -0500121 field->offset = offset;
122 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -0500123 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +0800124
Steven Rostedt2e33af02010-04-22 10:35:55 -0400125 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -0500126
127 return 0;
Tom Zanussicf027f62009-03-22 03:30:39 -0500128}
Li Zefan8728fe52010-05-24 16:22:49 +0800129
130int trace_define_field(struct ftrace_event_call *call, const char *type,
131 const char *name, int offset, int size, int is_signed,
132 int filter_type)
133{
134 struct list_head *head;
135
136 if (WARN_ON(!call->class))
137 return 0;
138
139 head = trace_get_fields(call);
140 return __trace_define_field(head, type, name, offset, size,
141 is_signed, filter_type);
142}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400143EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500144
Li Zefane647d6b2009-08-19 15:54:32 +0800145#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800146 ret = __trace_define_field(&ftrace_common_fields, #type, \
147 "common_" #item, \
148 offsetof(typeof(ent), item), \
149 sizeof(ent.item), \
150 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800151 if (ret) \
152 return ret;
153
Li Zefan8728fe52010-05-24 16:22:49 +0800154static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800155{
156 int ret;
157 struct trace_entry ent;
158
159 __common_field(unsigned short, type);
160 __common_field(unsigned char, flags);
161 __common_field(unsigned char, preempt_count);
162 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800163
164 return ret;
165}
166
zhangwei(Jovi)ad7067c2013-03-11 15:13:46 +0800167static void trace_destroy_fields(struct ftrace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800168{
169 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400170 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800171
Steven Rostedt2e33af02010-04-22 10:35:55 -0400172 head = trace_get_fields(call);
173 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800174 list_del(&field->link);
Steven Rostedtd1a29142013-02-27 20:23:57 -0500175 kmem_cache_free(field_cachep, field);
Li Zefan2df75e42009-05-06 10:33:04 +0800176 }
177}
178
Li Zefan87d9b4e2009-12-08 11:14:20 +0800179int trace_event_raw_init(struct ftrace_event_call *call)
180{
181 int id;
182
Steven Rostedt80decc72010-04-23 10:00:22 -0400183 id = register_ftrace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800184 if (!id)
185 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800186
187 return 0;
188}
189EXPORT_SYMBOL_GPL(trace_event_raw_init);
190
Jiri Olsaceec0b62012-02-15 15:51:49 +0100191int ftrace_event_reg(struct ftrace_event_call *call,
192 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400193{
Steven Rostedtae63b312012-05-03 23:09:03 -0400194 struct ftrace_event_file *file = data;
195
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400196 switch (type) {
197 case TRACE_REG_REGISTER:
198 return tracepoint_probe_register(call->name,
199 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400200 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400201 case TRACE_REG_UNREGISTER:
202 tracepoint_probe_unregister(call->name,
203 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400204 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400205 return 0;
206
207#ifdef CONFIG_PERF_EVENTS
208 case TRACE_REG_PERF_REGISTER:
209 return tracepoint_probe_register(call->name,
210 call->class->perf_probe,
211 call);
212 case TRACE_REG_PERF_UNREGISTER:
213 tracepoint_probe_unregister(call->name,
214 call->class->perf_probe,
215 call);
216 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100217 case TRACE_REG_PERF_OPEN:
218 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100219 case TRACE_REG_PERF_ADD:
220 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100221 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400222#endif
223 }
224 return 0;
225}
226EXPORT_SYMBOL_GPL(ftrace_event_reg);
227
Li Zefane870e9a2010-07-02 11:07:32 +0800228void trace_event_enable_cmd_record(bool enable)
229{
Steven Rostedtae63b312012-05-03 23:09:03 -0400230 struct ftrace_event_file *file;
231 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800232
233 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400234 do_for_each_event_file(tr, file) {
235
236 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800237 continue;
238
239 if (enable) {
240 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400241 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800242 } else {
243 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400244 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800245 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400246 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800247 mutex_unlock(&event_mutex);
248}
249
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400250static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
251 int enable, int soft_disable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500252{
Steven Rostedtae63b312012-05-03 23:09:03 -0400253 struct ftrace_event_call *call = file->event_call;
Li Zefan3b8e4272009-12-08 11:14:52 +0800254 int ret = 0;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400255 int disable;
Li Zefan3b8e4272009-12-08 11:14:52 +0800256
Steven Rostedtfd994982009-02-28 02:41:25 -0500257 switch (enable) {
258 case 0:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400259 /*
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900260 * When soft_disable is set and enable is cleared, the sm_ref
261 * reference counter is decremented. If it reaches 0, we want
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400262 * to clear the SOFT_DISABLED flag but leave the event in the
263 * state that it was. That is, if the event was enabled and
264 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
265 * is set we do not want the event to be enabled before we
266 * clear the bit.
267 *
268 * When soft_disable is not set but the SOFT_MODE flag is,
269 * we do nothing. Do not disable the tracepoint, otherwise
270 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
271 */
272 if (soft_disable) {
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900273 if (atomic_dec_return(&file->sm_ref) > 0)
274 break;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400275 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
276 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
277 } else
278 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
279
280 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
281 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
Steven Rostedtae63b312012-05-03 23:09:03 -0400282 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800283 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400284 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800285 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400286 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500287 }
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500288 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400289 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
290 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500291 else
292 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedtfd994982009-02-28 02:41:25 -0500293 break;
294 case 1:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400295 /*
296 * When soft_disable is set and enable is set, we want to
297 * register the tracepoint for the event, but leave the event
298 * as is. That means, if the event was already enabled, we do
299 * nothing (but set SOFT_MODE). If the event is disabled, we
300 * set SOFT_DISABLED before enabling the event tracepoint, so
301 * it still seems to be disabled.
302 */
303 if (!soft_disable)
304 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900305 else {
306 if (atomic_inc_return(&file->sm_ref) > 1)
307 break;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400308 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900309 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400310
Steven Rostedtae63b312012-05-03 23:09:03 -0400311 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400312
313 /* Keep the event disabled, when going to SOFT_MODE. */
314 if (soft_disable)
315 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
316
Li Zefane870e9a2010-07-02 11:07:32 +0800317 if (trace_flags & TRACE_ITER_RECORD_CMD) {
318 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400319 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800320 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400321 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800322 if (ret) {
323 tracing_stop_cmdline_record();
324 pr_info("event trace: Could not enable event "
325 "%s\n", call->name);
326 break;
327 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400328 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -0500329
330 /* WAS_ENABLED gets set but never cleared. */
331 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500332 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500333 break;
334 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800335
336 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500337}
338
Tom Zanussi85f2b082013-10-24 08:59:24 -0500339int trace_event_enable_disable(struct ftrace_event_file *file,
340 int enable, int soft_disable)
341{
342 return __ftrace_event_enable_disable(file, enable, soft_disable);
343}
344
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400345static int ftrace_event_enable_disable(struct ftrace_event_file *file,
346 int enable)
347{
348 return __ftrace_event_enable_disable(file, enable, 0);
349}
350
Steven Rostedtae63b312012-05-03 23:09:03 -0400351static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800352{
Steven Rostedtae63b312012-05-03 23:09:03 -0400353 struct ftrace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800354
355 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400356 list_for_each_entry(file, &tr->events, list) {
357 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800358 }
359 mutex_unlock(&event_mutex);
360}
361
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400362static void __put_system(struct event_subsystem *system)
363{
364 struct event_filter *filter = system->filter;
365
Steven Rostedt6e94a782013-06-27 10:58:31 -0400366 WARN_ON_ONCE(system_refcount(system) == 0);
367 if (system_refcount_dec(system))
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400368 return;
369
Steven Rostedtae63b312012-05-03 23:09:03 -0400370 list_del(&system->list);
371
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400372 if (filter) {
373 kfree(filter->filter_string);
374 kfree(filter);
375 }
Steven Rostedt6e94a782013-06-27 10:58:31 -0400376 if (system->ref_count & SYSTEM_FL_FREE_NAME)
377 kfree(system->name);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400378 kfree(system);
379}
380
381static void __get_system(struct event_subsystem *system)
382{
Steven Rostedt6e94a782013-06-27 10:58:31 -0400383 WARN_ON_ONCE(system_refcount(system) == 0);
384 system_refcount_inc(system);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400385}
386
Steven Rostedtae63b312012-05-03 23:09:03 -0400387static void __get_system_dir(struct ftrace_subsystem_dir *dir)
388{
389 WARN_ON_ONCE(dir->ref_count == 0);
390 dir->ref_count++;
391 __get_system(dir->subsystem);
392}
393
394static void __put_system_dir(struct ftrace_subsystem_dir *dir)
395{
396 WARN_ON_ONCE(dir->ref_count == 0);
397 /* If the subsystem is about to be freed, the dir must be too */
Steven Rostedt6e94a782013-06-27 10:58:31 -0400398 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
Steven Rostedtae63b312012-05-03 23:09:03 -0400399
400 __put_system(dir->subsystem);
401 if (!--dir->ref_count)
402 kfree(dir);
403}
404
405static void put_system(struct ftrace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400406{
407 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400408 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400409 mutex_unlock(&event_mutex);
410}
411
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200412static void remove_subsystem(struct ftrace_subsystem_dir *dir)
413{
414 if (!dir)
415 return;
416
417 if (!--dir->nr_events) {
418 debugfs_remove_recursive(dir->entry);
419 list_del(&dir->list);
420 __put_system_dir(dir);
421 }
422}
423
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200424static void remove_event_file_dir(struct ftrace_event_file *file)
425{
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200426 struct dentry *dir = file->dir;
427 struct dentry *child;
428
429 if (dir) {
430 spin_lock(&dir->d_lock); /* probably unneeded */
431 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
432 if (child->d_inode) /* probably unneeded */
433 child->d_inode->i_private = NULL;
434 }
435 spin_unlock(&dir->d_lock);
436
437 debugfs_remove_recursive(dir);
438 }
439
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200440 list_del(&file->list);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200441 remove_subsystem(file->system);
442 kmem_cache_free(file_cachep, file);
443}
444
Li Zefan8f31bfe2009-05-08 10:31:42 +0800445/*
446 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
447 */
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400448static int
449__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
450 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500451{
Steven Rostedtae63b312012-05-03 23:09:03 -0400452 struct ftrace_event_file *file;
Steven Rostedta59fd602009-04-10 13:52:20 -0400453 struct ftrace_event_call *call;
Steven Rostedt29f93942009-05-08 16:06:47 -0400454 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500455
Steven Rostedtae63b312012-05-03 23:09:03 -0400456 list_for_each_entry(file, &tr->events, list) {
457
458 call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500459
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400460 if (!call->name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500461 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500462
Steven Rostedt9b637762012-05-10 15:55:43 -0400463 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
464 continue;
465
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500466 if (match &&
467 strcmp(match, call->name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400468 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500469 continue;
470
Steven Rostedt8f082012010-04-20 10:47:33 -0400471 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500472 continue;
473
474 if (event && strcmp(event, call->name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500475 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500476
Steven Rostedtae63b312012-05-03 23:09:03 -0400477 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500478
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500479 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500480 }
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400481
482 return ret;
483}
484
485static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
486 const char *sub, const char *event, int set)
487{
488 int ret;
489
490 mutex_lock(&event_mutex);
491 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500492 mutex_unlock(&event_mutex);
493
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500494 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500495}
496
Steven Rostedtae63b312012-05-03 23:09:03 -0400497static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800498{
499 char *event = NULL, *sub = NULL, *match;
500
501 /*
502 * The buf format can be <subsystem>:<event-name>
503 * *:<event-name> means any event by that name.
504 * :<event-name> is the same.
505 *
506 * <subsystem>:* means all events in that subsystem
507 * <subsystem>: means the same.
508 *
509 * <name> (no ':') means all events in a subsystem with
510 * the name <name> or any event that matches <name>
511 */
512
513 match = strsep(&buf, ":");
514 if (buf) {
515 sub = match;
516 event = buf;
517 match = NULL;
518
519 if (!strlen(sub) || strcmp(sub, "*") == 0)
520 sub = NULL;
521 if (!strlen(event) || strcmp(event, "*") == 0)
522 event = NULL;
523 }
524
Steven Rostedtae63b312012-05-03 23:09:03 -0400525 return __ftrace_set_clr_event(tr, match, sub, event, set);
Li Zefan8f31bfe2009-05-08 10:31:42 +0800526}
527
Steven Rostedt4671c792009-05-08 16:27:41 -0400528/**
529 * trace_set_clr_event - enable or disable an event
530 * @system: system name to match (NULL for any system)
531 * @event: event name to match (NULL for all events, within system)
532 * @set: 1 to enable, 0 to disable
533 *
534 * This is a way for other parts of the kernel to enable or disable
535 * event recording.
536 *
537 * Returns 0 on success, -EINVAL if the parameters do not match any
538 * registered events.
539 */
540int trace_set_clr_event(const char *system, const char *event, int set)
541{
Steven Rostedtae63b312012-05-03 23:09:03 -0400542 struct trace_array *tr = top_trace_array();
543
544 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400545}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800546EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400547
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500548/* 128 should be much more than enough */
549#define EVENT_BUF_SIZE 127
550
551static ssize_t
552ftrace_event_write(struct file *file, const char __user *ubuf,
553 size_t cnt, loff_t *ppos)
554{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200555 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400556 struct seq_file *m = file->private_data;
557 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800558 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500559
Li Zefan4ba79782009-09-22 13:52:20 +0800560 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500561 return 0;
562
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400563 ret = tracing_update_buffers();
564 if (ret < 0)
565 return ret;
566
jolsa@redhat.com48966362009-09-11 17:29:28 +0200567 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500568 return -ENOMEM;
569
jolsa@redhat.com48966362009-09-11 17:29:28 +0200570 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500571
Li Zefan4ba79782009-09-22 13:52:20 +0800572 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200573 int set = 1;
574
575 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500576 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500577
jolsa@redhat.com48966362009-09-11 17:29:28 +0200578 parser.buffer[parser.idx] = 0;
579
Steven Rostedtae63b312012-05-03 23:09:03 -0400580 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500581 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200582 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500583 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500584
585 ret = read;
586
jolsa@redhat.com48966362009-09-11 17:29:28 +0200587 out_put:
588 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500589
590 return ret;
591}
592
593static void *
594t_next(struct seq_file *m, void *v, loff_t *pos)
595{
Steven Rostedtae63b312012-05-03 23:09:03 -0400596 struct ftrace_event_file *file = v;
597 struct ftrace_event_call *call;
598 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500599
600 (*pos)++;
601
Steven Rostedtae63b312012-05-03 23:09:03 -0400602 list_for_each_entry_continue(file, &tr->events, list) {
603 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400604 /*
605 * The ftrace subsystem is for showing formats only.
606 * They can not be enabled or disabled via the event files.
607 */
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400608 if (call->class && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -0400609 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400610 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500611
Li Zefan30bd39c2009-09-18 14:07:05 +0800612 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500613}
614
615static void *t_start(struct seq_file *m, loff_t *pos)
616{
Steven Rostedtae63b312012-05-03 23:09:03 -0400617 struct ftrace_event_file *file;
618 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800619 loff_t l;
620
Li Zefan20c89282009-05-06 10:33:45 +0800621 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800622
Steven Rostedtae63b312012-05-03 23:09:03 -0400623 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800624 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400625 file = t_next(m, file, &l);
626 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800627 break;
628 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400629 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500630}
631
632static void *
633s_next(struct seq_file *m, void *v, loff_t *pos)
634{
Steven Rostedtae63b312012-05-03 23:09:03 -0400635 struct ftrace_event_file *file = v;
636 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500637
638 (*pos)++;
639
Steven Rostedtae63b312012-05-03 23:09:03 -0400640 list_for_each_entry_continue(file, &tr->events, list) {
641 if (file->flags & FTRACE_EVENT_FL_ENABLED)
642 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500643 }
644
Li Zefan30bd39c2009-09-18 14:07:05 +0800645 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500646}
647
648static void *s_start(struct seq_file *m, loff_t *pos)
649{
Steven Rostedtae63b312012-05-03 23:09:03 -0400650 struct ftrace_event_file *file;
651 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800652 loff_t l;
653
Li Zefan20c89282009-05-06 10:33:45 +0800654 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800655
Steven Rostedtae63b312012-05-03 23:09:03 -0400656 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800657 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400658 file = s_next(m, file, &l);
659 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800660 break;
661 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400662 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500663}
664
665static int t_show(struct seq_file *m, void *v)
666{
Steven Rostedtae63b312012-05-03 23:09:03 -0400667 struct ftrace_event_file *file = v;
668 struct ftrace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500669
Steven Rostedt8f082012010-04-20 10:47:33 -0400670 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
671 seq_printf(m, "%s:", call->class->system);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500672 seq_printf(m, "%s\n", call->name);
673
674 return 0;
675}
676
677static void t_stop(struct seq_file *m, void *p)
678{
Li Zefan20c89282009-05-06 10:33:45 +0800679 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500680}
681
Steven Rostedt1473e442009-02-24 14:15:08 -0500682static ssize_t
683event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
684 loff_t *ppos)
685{
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200686 struct ftrace_event_file *file;
687 unsigned long flags;
Tom Zanussia4390592013-06-29 00:08:04 -0500688 char buf[4] = "0";
Steven Rostedt1473e442009-02-24 14:15:08 -0500689
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200690 mutex_lock(&event_mutex);
691 file = event_file_data(filp);
692 if (likely(file))
693 flags = file->flags;
694 mutex_unlock(&event_mutex);
695
696 if (!file)
697 return -ENODEV;
698
699 if (flags & FTRACE_EVENT_FL_ENABLED &&
700 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
Tom Zanussia4390592013-06-29 00:08:04 -0500701 strcpy(buf, "1");
702
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200703 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
704 flags & FTRACE_EVENT_FL_SOFT_MODE)
Tom Zanussia4390592013-06-29 00:08:04 -0500705 strcat(buf, "*");
706
707 strcat(buf, "\n");
Steven Rostedt1473e442009-02-24 14:15:08 -0500708
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400709 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
Steven Rostedt1473e442009-02-24 14:15:08 -0500710}
711
712static ssize_t
713event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
714 loff_t *ppos)
715{
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200716 struct ftrace_event_file *file;
Steven Rostedt1473e442009-02-24 14:15:08 -0500717 unsigned long val;
718 int ret;
719
Peter Huewe22fe9b52011-06-07 21:58:27 +0200720 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
721 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -0500722 return ret;
723
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400724 ret = tracing_update_buffers();
725 if (ret < 0)
726 return ret;
727
Steven Rostedt1473e442009-02-24 14:15:08 -0500728 switch (val) {
729 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -0500730 case 1:
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200731 ret = -ENODEV;
Steven Rostedt11a241a2009-03-02 11:49:04 -0500732 mutex_lock(&event_mutex);
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200733 file = event_file_data(filp);
734 if (likely(file))
735 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500736 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -0500737 break;
738
739 default:
740 return -EINVAL;
741 }
742
743 *ppos += cnt;
744
Li Zefan3b8e4272009-12-08 11:14:52 +0800745 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -0500746}
747
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400748static ssize_t
749system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
750 loff_t *ppos)
751{
Li Zefanc142b152009-05-08 10:32:05 +0800752 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedtae63b312012-05-03 23:09:03 -0400753 struct ftrace_subsystem_dir *dir = filp->private_data;
754 struct event_subsystem *system = dir->subsystem;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400755 struct ftrace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400756 struct ftrace_event_file *file;
757 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400758 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +0800759 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400760 int ret;
761
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400762 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400763 list_for_each_entry(file, &tr->events, list) {
764 call = file->event_call;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400765 if (!call->name || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400766 continue;
767
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400768 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400769 continue;
770
771 /*
772 * We need to find out if all the events are set
773 * or if all events or cleared, or if we have
774 * a mixture.
775 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400776 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +0800777
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400778 /*
779 * If we have a mixture, no need to look further.
780 */
Li Zefanc142b152009-05-08 10:32:05 +0800781 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400782 break;
783 }
784 mutex_unlock(&event_mutex);
785
Li Zefanc142b152009-05-08 10:32:05 +0800786 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400787 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400788
789 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
790
791 return ret;
792}
793
794static ssize_t
795system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
796 loff_t *ppos)
797{
Steven Rostedtae63b312012-05-03 23:09:03 -0400798 struct ftrace_subsystem_dir *dir = filp->private_data;
799 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400800 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400801 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400802 ssize_t ret;
803
Peter Huewe22fe9b52011-06-07 21:58:27 +0200804 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
805 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400806 return ret;
807
808 ret = tracing_update_buffers();
809 if (ret < 0)
810 return ret;
811
Li Zefan8f31bfe2009-05-08 10:31:42 +0800812 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400813 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400814
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400815 /*
816 * Opening of "enable" adds a ref count to system,
817 * so the name is safe to use.
818 */
819 if (system)
820 name = system->name;
821
Steven Rostedtae63b312012-05-03 23:09:03 -0400822 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400823 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800824 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400825
826 ret = cnt;
827
Li Zefan8f31bfe2009-05-08 10:31:42 +0800828out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400829 *ppos += cnt;
830
831 return ret;
832}
833
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400834enum {
835 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +0800836 FORMAT_FIELD_SEPERATOR = 2,
837 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400838};
839
840static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -0500841{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200842 struct ftrace_event_call *call = event_file_data(m->private);
Li Zefan86397dc2010-08-17 13:53:06 +0800843 struct list_head *common_head = &ftrace_common_fields;
844 struct list_head *head = trace_get_fields(call);
Oleg Nesterov7710b632013-07-18 20:47:10 +0200845 struct list_head *node = v;
Steven Rostedt981d0812009-03-02 13:53:59 -0500846
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400847 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800848
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400849 switch ((unsigned long)v) {
850 case FORMAT_HEADER:
Oleg Nesterov7710b632013-07-18 20:47:10 +0200851 node = common_head;
852 break;
Li Zefan86397dc2010-08-17 13:53:06 +0800853
854 case FORMAT_FIELD_SEPERATOR:
Oleg Nesterov7710b632013-07-18 20:47:10 +0200855 node = head;
856 break;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800857
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400858 case FORMAT_PRINTFMT:
859 /* all done */
860 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800861 }
862
Oleg Nesterov7710b632013-07-18 20:47:10 +0200863 node = node->prev;
864 if (node == common_head)
Li Zefan86397dc2010-08-17 13:53:06 +0800865 return (void *)FORMAT_FIELD_SEPERATOR;
Oleg Nesterov7710b632013-07-18 20:47:10 +0200866 else if (node == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400867 return (void *)FORMAT_PRINTFMT;
Oleg Nesterov7710b632013-07-18 20:47:10 +0200868 else
869 return node;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400870}
871
872static int f_show(struct seq_file *m, void *v)
873{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200874 struct ftrace_event_call *call = event_file_data(m->private);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400875 struct ftrace_event_field *field;
876 const char *array_descriptor;
877
878 switch ((unsigned long)v) {
879 case FORMAT_HEADER:
880 seq_printf(m, "name: %s\n", call->name);
881 seq_printf(m, "ID: %d\n", call->event.type);
882 seq_printf(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +0800883 return 0;
884
Li Zefan86397dc2010-08-17 13:53:06 +0800885 case FORMAT_FIELD_SEPERATOR:
886 seq_putc(m, '\n');
887 return 0;
888
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400889 case FORMAT_PRINTFMT:
890 seq_printf(m, "\nprint fmt: %s\n",
891 call->print_fmt);
892 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500893 }
894
Oleg Nesterov7710b632013-07-18 20:47:10 +0200895 field = list_entry(v, struct ftrace_event_field, link);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400896 /*
897 * Smartly shows the array type(except dynamic array).
898 * Normal:
899 * field:TYPE VAR
900 * If TYPE := TYPE[LEN], it is shown:
901 * field:TYPE VAR[LEN]
902 */
903 array_descriptor = strchr(field->type, '[');
904
905 if (!strncmp(field->type, "__data_loc", 10))
906 array_descriptor = NULL;
907
908 if (!array_descriptor)
909 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
910 field->type, field->name, field->offset,
911 field->size, !!field->is_signed);
912 else
913 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
914 (int)(array_descriptor - field->type),
915 field->type, field->name,
916 array_descriptor, field->offset,
917 field->size, !!field->is_signed);
918
919 return 0;
920}
921
Oleg Nesterov7710b632013-07-18 20:47:10 +0200922static void *f_start(struct seq_file *m, loff_t *pos)
923{
924 void *p = (void *)FORMAT_HEADER;
925 loff_t l = 0;
926
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200927 /* ->stop() is called even if ->start() fails */
928 mutex_lock(&event_mutex);
929 if (!event_file_data(m->private))
930 return ERR_PTR(-ENODEV);
931
Oleg Nesterov7710b632013-07-18 20:47:10 +0200932 while (l < *pos && p)
933 p = f_next(m, p, &l);
934
935 return p;
936}
937
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400938static void f_stop(struct seq_file *m, void *p)
939{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200940 mutex_unlock(&event_mutex);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400941}
942
943static const struct seq_operations trace_format_seq_ops = {
944 .start = f_start,
945 .next = f_next,
946 .stop = f_stop,
947 .show = f_show,
948};
949
950static int trace_format_open(struct inode *inode, struct file *file)
951{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400952 struct seq_file *m;
953 int ret;
954
955 ret = seq_open(file, &trace_format_seq_ops);
956 if (ret < 0)
957 return ret;
958
959 m = file->private_data;
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200960 m->private = file;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400961
962 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500963}
964
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100965static ssize_t
966event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
967{
Oleg Nesterov1a111262013-07-26 19:25:32 +0200968 int id = (long)event_file_data(filp);
Oleg Nesterovcd458ba2013-07-18 20:47:12 +0200969 char buf[32];
970 int len;
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100971
972 if (*ppos)
973 return 0;
974
Oleg Nesterov1a111262013-07-26 19:25:32 +0200975 if (unlikely(!id))
976 return -ENODEV;
977
978 len = sprintf(buf, "%d\n", id);
979
Oleg Nesterovcd458ba2013-07-18 20:47:12 +0200980 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100981}
982
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500983static ssize_t
984event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
985 loff_t *ppos)
986{
Tom Zanussif306cc82013-10-24 08:34:17 -0500987 struct ftrace_event_file *file;
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500988 struct trace_seq *s;
Oleg Nesterove2912b02013-07-26 19:25:40 +0200989 int r = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500990
991 if (*ppos)
992 return 0;
993
994 s = kmalloc(sizeof(*s), GFP_KERNEL);
Oleg Nesterove2912b02013-07-26 19:25:40 +0200995
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500996 if (!s)
997 return -ENOMEM;
998
999 trace_seq_init(s);
1000
Oleg Nesterove2912b02013-07-26 19:25:40 +02001001 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001002 file = event_file_data(filp);
1003 if (file)
1004 print_event_filter(file, s);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001005 mutex_unlock(&event_mutex);
1006
Tom Zanussif306cc82013-10-24 08:34:17 -05001007 if (file)
Oleg Nesterove2912b02013-07-26 19:25:40 +02001008 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001009
1010 kfree(s);
1011
1012 return r;
1013}
1014
1015static ssize_t
1016event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1017 loff_t *ppos)
1018{
Tom Zanussif306cc82013-10-24 08:34:17 -05001019 struct ftrace_event_file *file;
Tom Zanussi8b372562009-04-28 03:04:59 -05001020 char *buf;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001021 int err = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001022
Tom Zanussi8b372562009-04-28 03:04:59 -05001023 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001024 return -EINVAL;
1025
Tom Zanussi8b372562009-04-28 03:04:59 -05001026 buf = (char *)__get_free_page(GFP_TEMPORARY);
1027 if (!buf)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001028 return -ENOMEM;
1029
Tom Zanussi8b372562009-04-28 03:04:59 -05001030 if (copy_from_user(buf, ubuf, cnt)) {
1031 free_page((unsigned long) buf);
1032 return -EFAULT;
1033 }
1034 buf[cnt] = '\0';
1035
Oleg Nesterove2912b02013-07-26 19:25:40 +02001036 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001037 file = event_file_data(filp);
1038 if (file)
1039 err = apply_event_filter(file, buf);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001040 mutex_unlock(&event_mutex);
1041
Tom Zanussi8b372562009-04-28 03:04:59 -05001042 free_page((unsigned long) buf);
1043 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001044 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -05001045
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001046 *ppos += cnt;
1047
1048 return cnt;
1049}
1050
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001051static LIST_HEAD(event_subsystems);
1052
1053static int subsystem_open(struct inode *inode, struct file *filp)
1054{
1055 struct event_subsystem *system = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -04001056 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1057 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001058 int ret;
1059
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001060 if (tracing_is_disabled())
1061 return -ENODEV;
1062
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001063 /* Make sure the system still exists */
Alexander Z Lama8227412013-07-01 19:37:54 -07001064 mutex_lock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001065 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001066 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1067 list_for_each_entry(dir, &tr->systems, list) {
1068 if (dir == inode->i_private) {
1069 /* Don't open systems with no events */
1070 if (dir->nr_events) {
1071 __get_system_dir(dir);
1072 system = dir->subsystem;
1073 }
1074 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001075 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001076 }
1077 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001078 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001079 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001080 mutex_unlock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001081
Steven Rostedtae63b312012-05-03 23:09:03 -04001082 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001083 return -ENODEV;
1084
Steven Rostedtae63b312012-05-03 23:09:03 -04001085 /* Some versions of gcc think dir can be uninitialized here */
1086 WARN_ON(!dir);
1087
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001088 /* Still need to increment the ref count of the system */
1089 if (trace_array_get(tr) < 0) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001090 put_system(dir);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001091 return -ENODEV;
1092 }
1093
1094 ret = tracing_open_generic(inode, filp);
1095 if (ret < 0) {
1096 trace_array_put(tr);
1097 put_system(dir);
1098 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001099
1100 return ret;
1101}
1102
1103static int system_tr_open(struct inode *inode, struct file *filp)
1104{
1105 struct ftrace_subsystem_dir *dir;
1106 struct trace_array *tr = inode->i_private;
1107 int ret;
1108
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001109 if (tracing_is_disabled())
1110 return -ENODEV;
1111
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001112 if (trace_array_get(tr) < 0)
1113 return -ENODEV;
1114
Steven Rostedtae63b312012-05-03 23:09:03 -04001115 /* Make a temporary dir that has no system but points to tr */
1116 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001117 if (!dir) {
1118 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001119 return -ENOMEM;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001120 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001121
1122 dir->tr = tr;
1123
1124 ret = tracing_open_generic(inode, filp);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001125 if (ret < 0) {
1126 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001127 kfree(dir);
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001128 return ret;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001129 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001130
1131 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001132
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001133 return 0;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001134}
1135
1136static int subsystem_release(struct inode *inode, struct file *file)
1137{
Steven Rostedtae63b312012-05-03 23:09:03 -04001138 struct ftrace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001139
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001140 trace_array_put(dir->tr);
1141
Steven Rostedtae63b312012-05-03 23:09:03 -04001142 /*
1143 * If dir->subsystem is NULL, then this is a temporary
1144 * descriptor that was made for a trace_array to enable
1145 * all subsystems.
1146 */
1147 if (dir->subsystem)
1148 put_system(dir);
1149 else
1150 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001151
1152 return 0;
1153}
1154
Tom Zanussicfb180f2009-03-22 03:31:17 -05001155static ssize_t
1156subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1157 loff_t *ppos)
1158{
Steven Rostedtae63b312012-05-03 23:09:03 -04001159 struct ftrace_subsystem_dir *dir = filp->private_data;
1160 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001161 struct trace_seq *s;
1162 int r;
1163
1164 if (*ppos)
1165 return 0;
1166
1167 s = kmalloc(sizeof(*s), GFP_KERNEL);
1168 if (!s)
1169 return -ENOMEM;
1170
1171 trace_seq_init(s);
1172
Tom Zanussi8b372562009-04-28 03:04:59 -05001173 print_subsystem_event_filter(system, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -05001174 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001175
1176 kfree(s);
1177
1178 return r;
1179}
1180
1181static ssize_t
1182subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1183 loff_t *ppos)
1184{
Steven Rostedtae63b312012-05-03 23:09:03 -04001185 struct ftrace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001186 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001187 int err;
1188
Tom Zanussi8b372562009-04-28 03:04:59 -05001189 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001190 return -EINVAL;
1191
Tom Zanussi8b372562009-04-28 03:04:59 -05001192 buf = (char *)__get_free_page(GFP_TEMPORARY);
1193 if (!buf)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001194 return -ENOMEM;
1195
Tom Zanussi8b372562009-04-28 03:04:59 -05001196 if (copy_from_user(buf, ubuf, cnt)) {
1197 free_page((unsigned long) buf);
1198 return -EFAULT;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001199 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001200 buf[cnt] = '\0';
Tom Zanussicfb180f2009-03-22 03:31:17 -05001201
Steven Rostedtae63b312012-05-03 23:09:03 -04001202 err = apply_subsystem_event_filter(dir, buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001203 free_page((unsigned long) buf);
1204 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001205 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001206
1207 *ppos += cnt;
1208
1209 return cnt;
1210}
1211
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001212static ssize_t
1213show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1214{
1215 int (*func)(struct trace_seq *s) = filp->private_data;
1216 struct trace_seq *s;
1217 int r;
1218
1219 if (*ppos)
1220 return 0;
1221
1222 s = kmalloc(sizeof(*s), GFP_KERNEL);
1223 if (!s)
1224 return -ENOMEM;
1225
1226 trace_seq_init(s);
1227
1228 func(s);
1229 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1230
1231 kfree(s);
1232
1233 return r;
1234}
1235
Steven Rostedt15075ca2012-05-03 14:57:28 -04001236static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1237static int ftrace_event_set_open(struct inode *inode, struct file *file);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001238static int ftrace_event_release(struct inode *inode, struct file *file);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001239
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001240static const struct seq_operations show_event_seq_ops = {
1241 .start = t_start,
1242 .next = t_next,
1243 .show = t_show,
1244 .stop = t_stop,
1245};
1246
1247static const struct seq_operations show_set_event_seq_ops = {
1248 .start = s_start,
1249 .next = s_next,
1250 .show = t_show,
1251 .stop = t_stop,
1252};
1253
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001254static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001255 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001256 .read = seq_read,
1257 .llseek = seq_lseek,
1258 .release = seq_release,
1259};
1260
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001261static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001262 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001263 .read = seq_read,
1264 .write = ftrace_event_write,
1265 .llseek = seq_lseek,
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001266 .release = ftrace_event_release,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001267};
1268
Steven Rostedt1473e442009-02-24 14:15:08 -05001269static const struct file_operations ftrace_enable_fops = {
Oleg Nesterovbf682c32013-07-28 20:35:27 +02001270 .open = tracing_open_generic,
Steven Rostedt1473e442009-02-24 14:15:08 -05001271 .read = event_enable_read,
1272 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001273 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001274};
1275
Steven Rostedt981d0812009-03-02 13:53:59 -05001276static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001277 .open = trace_format_open,
1278 .read = seq_read,
1279 .llseek = seq_lseek,
1280 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001281};
1282
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001283static const struct file_operations ftrace_event_id_fops = {
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001284 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001285 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001286};
1287
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001288static const struct file_operations ftrace_event_filter_fops = {
1289 .open = tracing_open_generic,
1290 .read = event_filter_read,
1291 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001292 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001293};
1294
Tom Zanussicfb180f2009-03-22 03:31:17 -05001295static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001296 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001297 .read = subsystem_filter_read,
1298 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001299 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001300 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001301};
1302
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001303static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001304 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001305 .read = system_enable_read,
1306 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001307 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001308 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001309};
1310
Steven Rostedtae63b312012-05-03 23:09:03 -04001311static const struct file_operations ftrace_tr_enable_fops = {
1312 .open = system_tr_open,
1313 .read = system_enable_read,
1314 .write = system_enable_write,
1315 .llseek = default_llseek,
1316 .release = subsystem_release,
1317};
1318
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001319static const struct file_operations ftrace_show_header_fops = {
1320 .open = tracing_open_generic,
1321 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001322 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001323};
1324
Steven Rostedtae63b312012-05-03 23:09:03 -04001325static int
1326ftrace_event_open(struct inode *inode, struct file *file,
1327 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001328{
Steven Rostedtae63b312012-05-03 23:09:03 -04001329 struct seq_file *m;
1330 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001331
Steven Rostedtae63b312012-05-03 23:09:03 -04001332 ret = seq_open(file, seq_ops);
1333 if (ret < 0)
1334 return ret;
1335 m = file->private_data;
1336 /* copy tr over to seq ops */
1337 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001338
Steven Rostedtae63b312012-05-03 23:09:03 -04001339 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001340}
1341
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001342static int ftrace_event_release(struct inode *inode, struct file *file)
1343{
1344 struct trace_array *tr = inode->i_private;
1345
1346 trace_array_put(tr);
1347
1348 return seq_release(inode, file);
1349}
1350
Steven Rostedt15075ca2012-05-03 14:57:28 -04001351static int
1352ftrace_event_avail_open(struct inode *inode, struct file *file)
1353{
1354 const struct seq_operations *seq_ops = &show_event_seq_ops;
1355
Steven Rostedtae63b312012-05-03 23:09:03 -04001356 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001357}
1358
1359static int
1360ftrace_event_set_open(struct inode *inode, struct file *file)
1361{
1362 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001363 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001364 int ret;
1365
1366 if (trace_array_get(tr) < 0)
1367 return -ENODEV;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001368
1369 if ((file->f_mode & FMODE_WRITE) &&
1370 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001371 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001372
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001373 ret = ftrace_event_open(inode, file, seq_ops);
1374 if (ret < 0)
1375 trace_array_put(tr);
1376 return ret;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001377}
1378
Steven Rostedtae63b312012-05-03 23:09:03 -04001379static struct event_subsystem *
1380create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001381{
1382 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001383
1384 /* need to create new entry */
1385 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001386 if (!system)
1387 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001388
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001389 system->ref_count = 1;
Steven Rostedt6e94a782013-06-27 10:58:31 -04001390
1391 /* Only allocate if dynamic (kprobes and modules) */
1392 if (!core_kernel_data((unsigned long)name)) {
1393 system->ref_count |= SYSTEM_FL_FREE_NAME;
1394 system->name = kstrdup(name, GFP_KERNEL);
1395 if (!system->name)
1396 goto out_free;
1397 } else
1398 system->name = name;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001399
Tom Zanussi30e673b2009-04-28 03:04:47 -05001400 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001401
Tom Zanussi8b372562009-04-28 03:04:59 -05001402 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001403 if (!system->filter)
1404 goto out_free;
1405
1406 list_add(&system->list, &event_subsystems);
1407
1408 return system;
1409
1410 out_free:
Steven Rostedt6e94a782013-06-27 10:58:31 -04001411 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1412 kfree(system->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001413 kfree(system);
1414 return NULL;
1415}
1416
1417static struct dentry *
1418event_subsystem_dir(struct trace_array *tr, const char *name,
1419 struct ftrace_event_file *file, struct dentry *parent)
1420{
1421 struct ftrace_subsystem_dir *dir;
1422 struct event_subsystem *system;
1423 struct dentry *entry;
1424
1425 /* First see if we did not already create this dir */
1426 list_for_each_entry(dir, &tr->systems, list) {
1427 system = dir->subsystem;
1428 if (strcmp(system->name, name) == 0) {
1429 dir->nr_events++;
1430 file->system = dir;
1431 return dir->entry;
1432 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001433 }
1434
Steven Rostedtae63b312012-05-03 23:09:03 -04001435 /* Now see if the system itself exists. */
1436 list_for_each_entry(system, &event_subsystems, list) {
1437 if (strcmp(system->name, name) == 0)
1438 break;
1439 }
1440 /* Reset system variable when not found */
1441 if (&system->list == &event_subsystems)
1442 system = NULL;
1443
1444 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1445 if (!dir)
1446 goto out_fail;
1447
1448 if (!system) {
1449 system = create_new_subsystem(name);
1450 if (!system)
1451 goto out_free;
1452 } else
1453 __get_system(system);
1454
1455 dir->entry = debugfs_create_dir(name, parent);
1456 if (!dir->entry) {
1457 pr_warning("Failed to create system directory %s\n", name);
1458 __put_system(system);
1459 goto out_free;
1460 }
1461
1462 dir->tr = tr;
1463 dir->ref_count = 1;
1464 dir->nr_events = 1;
1465 dir->subsystem = system;
1466 file->system = dir;
1467
1468 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001469 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001470 if (!entry) {
1471 kfree(system->filter);
1472 system->filter = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -04001473 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001474 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001475
Steven Rostedtae63b312012-05-03 23:09:03 -04001476 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001477 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001478
Steven Rostedtae63b312012-05-03 23:09:03 -04001479 list_add(&dir->list, &tr->systems);
1480
1481 return dir->entry;
1482
1483 out_free:
1484 kfree(dir);
1485 out_fail:
1486 /* Only print this message if failed on memory allocation */
1487 if (!dir || !system)
1488 pr_warning("No memory to create event subsystem %s\n",
1489 name);
1490 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001491}
1492
Steven Rostedt1473e442009-02-24 14:15:08 -05001493static int
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001494event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
Steven Rostedt1473e442009-02-24 14:15:08 -05001495{
Steven Rostedtae63b312012-05-03 23:09:03 -04001496 struct ftrace_event_call *call = file->event_call;
1497 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001498 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001499 struct dentry *d_events;
Steven Rostedtfd994982009-02-28 02:41:25 -05001500 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001501
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001502 /*
1503 * If the trace point header did not define TRACE_SYSTEM
1504 * then the system would be called "TRACE_SYSTEM".
1505 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001506 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1507 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1508 if (!d_events)
1509 return -ENOMEM;
1510 } else
1511 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001512
Steven Rostedtae63b312012-05-03 23:09:03 -04001513 file->dir = debugfs_create_dir(call->name, d_events);
1514 if (!file->dir) {
1515 pr_warning("Could not create debugfs '%s' directory\n",
1516 call->name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001517 return -1;
1518 }
1519
Steven Rostedt9b637762012-05-10 15:55:43 -04001520 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001521 trace_create_file("enable", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001522 &ftrace_enable_fops);
Steven Rostedt1473e442009-02-24 14:15:08 -05001523
Steven Rostedt22392912010-04-21 12:27:06 -04001524#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001525 if (call->event.type && call->class->reg)
Oleg Nesterov1a111262013-07-26 19:25:32 +02001526 trace_create_file("id", 0444, file->dir,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001527 (void *)(long)call->event.type,
1528 &ftrace_event_id_fops);
Steven Rostedt22392912010-04-21 12:27:06 -04001529#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001530
Li Zefanc9d932c2010-05-24 16:24:28 +08001531 /*
1532 * Other events may have the same class. Only update
1533 * the fields if they are not already defined.
1534 */
1535 head = trace_get_fields(call);
1536 if (list_empty(head)) {
1537 ret = call->class->define_fields(call);
1538 if (ret < 0) {
1539 pr_warning("Could not initialize trace point"
1540 " events/%s\n", call->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001541 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05001542 }
1543 }
Tom Zanussif306cc82013-10-24 08:34:17 -05001544 trace_create_file("filter", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001545 &ftrace_event_filter_fops);
Tom Zanussicf027f62009-03-22 03:30:39 -05001546
Tom Zanussi85f2b082013-10-24 08:59:24 -05001547 trace_create_file("trigger", 0644, file->dir, file,
1548 &event_trigger_fops);
1549
Steven Rostedtae63b312012-05-03 23:09:03 -04001550 trace_create_file("format", 0444, file->dir, call,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001551 &ftrace_event_format_fops);
Steven Rostedtfd994982009-02-28 02:41:25 -05001552
Steven Rostedt1473e442009-02-24 14:15:08 -05001553 return 0;
1554}
1555
Steven Rostedtae63b312012-05-03 23:09:03 -04001556static void remove_event_from_tracers(struct ftrace_event_call *call)
1557{
1558 struct ftrace_event_file *file;
1559 struct trace_array *tr;
1560
1561 do_for_each_event_file_safe(tr, file) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001562 if (file->event_call != call)
1563 continue;
1564
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02001565 remove_event_file_dir(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001566 /*
1567 * The do_for_each_event_file_safe() is
1568 * a double loop. After finding the call for this
1569 * trace_array, we use break to jump to the next
1570 * trace_array.
1571 */
1572 break;
1573 } while_for_each_event_file();
1574}
1575
Ezequiel Garcia87819152012-09-12 11:47:57 -03001576static void event_remove(struct ftrace_event_call *call)
1577{
Steven Rostedtae63b312012-05-03 23:09:03 -04001578 struct trace_array *tr;
1579 struct ftrace_event_file *file;
1580
1581 do_for_each_event_file(tr, file) {
1582 if (file->event_call != call)
1583 continue;
1584 ftrace_event_enable_disable(file, 0);
Tom Zanussif306cc82013-10-24 08:34:17 -05001585 destroy_preds(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001586 /*
1587 * The do_for_each_event_file() is
1588 * a double loop. After finding the call for this
1589 * trace_array, we use break to jump to the next
1590 * trace_array.
1591 */
1592 break;
1593 } while_for_each_event_file();
1594
Ezequiel Garcia87819152012-09-12 11:47:57 -03001595 if (call->event.funcs)
1596 __unregister_ftrace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04001597 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001598 list_del(&call->list);
1599}
1600
1601static int event_init(struct ftrace_event_call *call)
1602{
1603 int ret = 0;
1604
1605 if (WARN_ON(!call->name))
1606 return -EINVAL;
1607
1608 if (call->class->raw_init) {
1609 ret = call->class->raw_init(call);
1610 if (ret < 0 && ret != -ENOSYS)
1611 pr_warn("Could not initialize trace events/%s\n",
1612 call->name);
1613 }
1614
1615 return ret;
1616}
1617
Li Zefan67ead0a2010-05-24 16:25:13 +08001618static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001619__register_event(struct ftrace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001620{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001621 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04001622
Ezequiel Garcia87819152012-09-12 11:47:57 -03001623 ret = event_init(call);
1624 if (ret < 0)
1625 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04001626
Steven Rostedtae63b312012-05-03 23:09:03 -04001627 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08001628 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001629
Steven Rostedtae63b312012-05-03 23:09:03 -04001630 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001631}
1632
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001633static struct ftrace_event_file *
1634trace_create_new_event(struct ftrace_event_call *call,
1635 struct trace_array *tr)
1636{
1637 struct ftrace_event_file *file;
1638
1639 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1640 if (!file)
1641 return NULL;
1642
1643 file->event_call = call;
1644 file->tr = tr;
1645 atomic_set(&file->sm_ref, 0);
Tom Zanussi85f2b082013-10-24 08:59:24 -05001646 atomic_set(&file->tm_ref, 0);
1647 INIT_LIST_HEAD(&file->triggers);
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001648 list_add(&file->list, &tr->events);
1649
1650 return file;
1651}
1652
Steven Rostedtae63b312012-05-03 23:09:03 -04001653/* Add an event to a trace directory */
1654static int
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001655__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04001656{
1657 struct ftrace_event_file *file;
1658
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001659 file = trace_create_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001660 if (!file)
1661 return -ENOMEM;
1662
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001663 return event_create_dir(tr->event_dir, file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001664}
1665
Steven Rostedt77248222013-02-27 16:28:06 -05001666/*
1667 * Just create a decriptor for early init. A descriptor is required
1668 * for enabling events at boot. We want to enable events before
1669 * the filesystem is initialized.
1670 */
1671static __init int
1672__trace_early_add_new_event(struct ftrace_event_call *call,
1673 struct trace_array *tr)
1674{
1675 struct ftrace_event_file *file;
1676
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001677 file = trace_create_new_event(call, tr);
Steven Rostedt77248222013-02-27 16:28:06 -05001678 if (!file)
1679 return -ENOMEM;
1680
Steven Rostedt77248222013-02-27 16:28:06 -05001681 return 0;
1682}
1683
Steven Rostedtae63b312012-05-03 23:09:03 -04001684struct ftrace_module_file_ops;
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001685static void __add_event_to_tracers(struct ftrace_event_call *call);
Steven Rostedtae63b312012-05-03 23:09:03 -04001686
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001687/* Add an additional event_call dynamically */
1688int trace_add_event_call(struct ftrace_event_call *call)
1689{
1690 int ret;
Alexander Z Lama8227412013-07-01 19:37:54 -07001691 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001692 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001693
1694 ret = __register_event(call, NULL);
1695 if (ret >= 0)
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001696 __add_event_to_tracers(call);
Steven Rostedtae63b312012-05-03 23:09:03 -04001697
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001698 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001699 mutex_unlock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001700 return ret;
1701}
Steven Rostedt701970b2009-04-24 23:11:22 -04001702
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001703/*
Alexander Z Lama8227412013-07-01 19:37:54 -07001704 * Must be called under locking of trace_types_lock, event_mutex and
1705 * trace_event_sem.
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001706 */
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001707static void __trace_remove_event_call(struct ftrace_event_call *call)
1708{
Ezequiel Garcia87819152012-09-12 11:47:57 -03001709 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001710 trace_destroy_fields(call);
Tom Zanussif306cc82013-10-24 08:34:17 -05001711 destroy_call_preds(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001712}
1713
Oleg Nesterov2816c552013-07-29 19:50:33 +02001714static int probe_remove_event_call(struct ftrace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001715{
Oleg Nesterov2816c552013-07-29 19:50:33 +02001716 struct trace_array *tr;
1717 struct ftrace_event_file *file;
1718
1719#ifdef CONFIG_PERF_EVENTS
1720 if (call->perf_refcount)
1721 return -EBUSY;
1722#endif
1723 do_for_each_event_file(tr, file) {
1724 if (file->event_call != call)
1725 continue;
1726 /*
1727 * We can't rely on ftrace_event_enable_disable(enable => 0)
1728 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1729 * TRACE_REG_UNREGISTER.
1730 */
1731 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1732 return -EBUSY;
Steven Rostedt (Red Hat)2ba64032013-07-31 13:16:22 -04001733 /*
1734 * The do_for_each_event_file_safe() is
1735 * a double loop. After finding the call for this
1736 * trace_array, we use break to jump to the next
1737 * trace_array.
1738 */
Oleg Nesterov2816c552013-07-29 19:50:33 +02001739 break;
1740 } while_for_each_event_file();
1741
1742 __trace_remove_event_call(call);
1743
1744 return 0;
1745}
1746
1747/* Remove an event_call */
1748int trace_remove_event_call(struct ftrace_event_call *call)
1749{
1750 int ret;
1751
Alexander Z Lama8227412013-07-01 19:37:54 -07001752 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001753 mutex_lock(&event_mutex);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001754 down_write(&trace_event_sem);
Oleg Nesterov2816c552013-07-29 19:50:33 +02001755 ret = probe_remove_event_call(call);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001756 up_write(&trace_event_sem);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001757 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001758 mutex_unlock(&trace_types_lock);
Oleg Nesterov2816c552013-07-29 19:50:33 +02001759
1760 return ret;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001761}
1762
1763#define for_each_event(event, start, end) \
1764 for (event = start; \
1765 (unsigned long)event < (unsigned long)end; \
1766 event++)
1767
1768#ifdef CONFIG_MODULES
1769
Steven Rostedt6d723732009-04-10 14:53:50 -04001770static void trace_module_add_events(struct module *mod)
1771{
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001772 struct ftrace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04001773
1774 start = mod->trace_events;
1775 end = mod->trace_events + mod->num_trace_events;
1776
Steven Rostedt6d723732009-04-10 14:53:50 -04001777 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001778 __register_event(*call, mod);
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001779 __add_event_to_tracers(*call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001780 }
1781}
1782
1783static void trace_module_remove_events(struct module *mod)
1784{
1785 struct ftrace_event_call *call, *p;
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001786 bool clear_trace = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04001787
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001788 down_write(&trace_event_sem);
Steven Rostedt6d723732009-04-10 14:53:50 -04001789 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1790 if (call->mod == mod) {
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001791 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1792 clear_trace = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001793 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001794 }
1795 }
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001796 up_write(&trace_event_sem);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001797
1798 /*
1799 * It is safest to reset the ring buffer if the module being unloaded
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001800 * registered any events that were used. The only worry is if
1801 * a new module gets loaded, and takes on the same id as the events
1802 * of this module. When printing out the buffer, traced events left
1803 * over from this module may be passed to the new module events and
1804 * unexpected results may occur.
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001805 */
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001806 if (clear_trace)
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001807 tracing_reset_all_online_cpus();
Steven Rostedt6d723732009-04-10 14:53:50 -04001808}
1809
Steven Rostedt61f919a2009-04-14 18:22:32 -04001810static int trace_module_notify(struct notifier_block *self,
1811 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04001812{
1813 struct module *mod = data;
1814
Alexander Z Lama8227412013-07-01 19:37:54 -07001815 mutex_lock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04001816 mutex_lock(&event_mutex);
1817 switch (val) {
1818 case MODULE_STATE_COMING:
1819 trace_module_add_events(mod);
1820 break;
1821 case MODULE_STATE_GOING:
1822 trace_module_remove_events(mod);
1823 break;
1824 }
1825 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001826 mutex_unlock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04001827
1828 return 0;
1829}
Steven Rostedt (Red Hat)315326c2013-03-02 17:37:14 -05001830
Oleg Nesterov836d4812013-07-31 19:31:37 +02001831static struct notifier_block trace_module_nb = {
1832 .notifier_call = trace_module_notify,
1833 .priority = 0,
1834};
Steven Rostedt61f919a2009-04-14 18:22:32 -04001835#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04001836
Steven Rostedtae63b312012-05-03 23:09:03 -04001837/* Create a new event directory structure for a trace directory. */
1838static void
1839__trace_add_event_dirs(struct trace_array *tr)
1840{
Steven Rostedtae63b312012-05-03 23:09:03 -04001841 struct ftrace_event_call *call;
1842 int ret;
1843
1844 list_for_each_entry(call, &ftrace_events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001845 ret = __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001846 if (ret < 0)
1847 pr_warning("Could not create directory for event %s\n",
1848 call->name);
1849 }
1850}
1851
Tom Zanussi7862ad12013-10-24 08:59:28 -05001852struct ftrace_event_file *
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001853find_event_file(struct trace_array *tr, const char *system, const char *event)
1854{
1855 struct ftrace_event_file *file;
1856 struct ftrace_event_call *call;
1857
1858 list_for_each_entry(file, &tr->events, list) {
1859
1860 call = file->event_call;
1861
1862 if (!call->name || !call->class || !call->class->reg)
1863 continue;
1864
1865 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1866 continue;
1867
1868 if (strcmp(event, call->name) == 0 &&
1869 strcmp(system, call->class->system) == 0)
1870 return file;
1871 }
1872 return NULL;
1873}
1874
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05001875#ifdef CONFIG_DYNAMIC_FTRACE
1876
1877/* Avoid typos */
1878#define ENABLE_EVENT_STR "enable_event"
1879#define DISABLE_EVENT_STR "disable_event"
1880
1881struct event_probe_data {
1882 struct ftrace_event_file *file;
1883 unsigned long count;
1884 int ref;
1885 bool enable;
1886};
1887
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001888static void
1889event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1890{
1891 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1892 struct event_probe_data *data = *pdata;
1893
1894 if (!data)
1895 return;
1896
1897 if (data->enable)
1898 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1899 else
1900 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1901}
1902
1903static void
1904event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1905{
1906 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1907 struct event_probe_data *data = *pdata;
1908
1909 if (!data)
1910 return;
1911
1912 if (!data->count)
1913 return;
1914
1915 /* Skip if the event is in a state we want to switch to */
1916 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1917 return;
1918
1919 if (data->count != -1)
1920 (data->count)--;
1921
1922 event_enable_probe(ip, parent_ip, _data);
1923}
1924
1925static int
1926event_enable_print(struct seq_file *m, unsigned long ip,
1927 struct ftrace_probe_ops *ops, void *_data)
1928{
1929 struct event_probe_data *data = _data;
1930
1931 seq_printf(m, "%ps:", (void *)ip);
1932
1933 seq_printf(m, "%s:%s:%s",
1934 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1935 data->file->event_call->class->system,
1936 data->file->event_call->name);
1937
1938 if (data->count == -1)
1939 seq_printf(m, ":unlimited\n");
1940 else
1941 seq_printf(m, ":count=%ld\n", data->count);
1942
1943 return 0;
1944}
1945
1946static int
1947event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1948 void **_data)
1949{
1950 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1951 struct event_probe_data *data = *pdata;
1952
1953 data->ref++;
1954 return 0;
1955}
1956
1957static void
1958event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
1959 void **_data)
1960{
1961 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1962 struct event_probe_data *data = *pdata;
1963
1964 if (WARN_ON_ONCE(data->ref <= 0))
1965 return;
1966
1967 data->ref--;
1968 if (!data->ref) {
1969 /* Remove the SOFT_MODE flag */
1970 __ftrace_event_enable_disable(data->file, 0, 1);
1971 module_put(data->file->event_call->mod);
1972 kfree(data);
1973 }
1974 *pdata = NULL;
1975}
1976
1977static struct ftrace_probe_ops event_enable_probe_ops = {
1978 .func = event_enable_probe,
1979 .print = event_enable_print,
1980 .init = event_enable_init,
1981 .free = event_enable_free,
1982};
1983
1984static struct ftrace_probe_ops event_enable_count_probe_ops = {
1985 .func = event_enable_count_probe,
1986 .print = event_enable_print,
1987 .init = event_enable_init,
1988 .free = event_enable_free,
1989};
1990
1991static struct ftrace_probe_ops event_disable_probe_ops = {
1992 .func = event_enable_probe,
1993 .print = event_enable_print,
1994 .init = event_enable_init,
1995 .free = event_enable_free,
1996};
1997
1998static struct ftrace_probe_ops event_disable_count_probe_ops = {
1999 .func = event_enable_count_probe,
2000 .print = event_enable_print,
2001 .init = event_enable_init,
2002 .free = event_enable_free,
2003};
2004
2005static int
2006event_enable_func(struct ftrace_hash *hash,
2007 char *glob, char *cmd, char *param, int enabled)
2008{
2009 struct trace_array *tr = top_trace_array();
2010 struct ftrace_event_file *file;
2011 struct ftrace_probe_ops *ops;
2012 struct event_probe_data *data;
2013 const char *system;
2014 const char *event;
2015 char *number;
2016 bool enable;
2017 int ret;
2018
2019 /* hash funcs only work with set_ftrace_filter */
Harsh Prateek Bora8092e802013-05-24 12:52:17 +05302020 if (!enabled || !param)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002021 return -EINVAL;
2022
2023 system = strsep(&param, ":");
2024 if (!param)
2025 return -EINVAL;
2026
2027 event = strsep(&param, ":");
2028
2029 mutex_lock(&event_mutex);
2030
2031 ret = -EINVAL;
2032 file = find_event_file(tr, system, event);
2033 if (!file)
2034 goto out;
2035
2036 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2037
2038 if (enable)
2039 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2040 else
2041 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2042
2043 if (glob[0] == '!') {
2044 unregister_ftrace_function_probe_func(glob+1, ops);
2045 ret = 0;
2046 goto out;
2047 }
2048
2049 ret = -ENOMEM;
2050 data = kzalloc(sizeof(*data), GFP_KERNEL);
2051 if (!data)
2052 goto out;
2053
2054 data->enable = enable;
2055 data->count = -1;
2056 data->file = file;
2057
2058 if (!param)
2059 goto out_reg;
2060
2061 number = strsep(&param, ":");
2062
2063 ret = -EINVAL;
2064 if (!strlen(number))
2065 goto out_free;
2066
2067 /*
2068 * We use the callback data field (which is a pointer)
2069 * as our counter.
2070 */
2071 ret = kstrtoul(number, 0, &data->count);
2072 if (ret)
2073 goto out_free;
2074
2075 out_reg:
2076 /* Don't let event modules unload while probe registered */
2077 ret = try_module_get(file->event_call->mod);
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002078 if (!ret) {
2079 ret = -EBUSY;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002080 goto out_free;
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002081 }
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002082
2083 ret = __ftrace_event_enable_disable(file, 1, 1);
2084 if (ret < 0)
2085 goto out_put;
2086 ret = register_ftrace_function_probe(glob, ops, data);
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002087 /*
2088 * The above returns on success the # of functions enabled,
2089 * but if it didn't find any functions it returns zero.
2090 * Consider no functions a failure too.
2091 */
Masami Hiramatsua5b85bd2013-05-09 14:44:14 +09002092 if (!ret) {
2093 ret = -ENOENT;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002094 goto out_disable;
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002095 } else if (ret < 0)
2096 goto out_disable;
2097 /* Just return zero, not the number of enabled functions */
2098 ret = 0;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002099 out:
2100 mutex_unlock(&event_mutex);
2101 return ret;
2102
2103 out_disable:
2104 __ftrace_event_enable_disable(file, 0, 1);
2105 out_put:
2106 module_put(file->event_call->mod);
2107 out_free:
2108 kfree(data);
2109 goto out;
2110}
2111
2112static struct ftrace_func_command event_enable_cmd = {
2113 .name = ENABLE_EVENT_STR,
2114 .func = event_enable_func,
2115};
2116
2117static struct ftrace_func_command event_disable_cmd = {
2118 .name = DISABLE_EVENT_STR,
2119 .func = event_enable_func,
2120};
2121
2122static __init int register_event_cmds(void)
2123{
2124 int ret;
2125
2126 ret = register_ftrace_command(&event_enable_cmd);
2127 if (WARN_ON(ret < 0))
2128 return ret;
2129 ret = register_ftrace_command(&event_disable_cmd);
2130 if (WARN_ON(ret < 0))
2131 unregister_ftrace_command(&event_enable_cmd);
2132 return ret;
2133}
2134#else
2135static inline int register_event_cmds(void) { return 0; }
2136#endif /* CONFIG_DYNAMIC_FTRACE */
2137
Steven Rostedt77248222013-02-27 16:28:06 -05002138/*
2139 * The top level array has already had its ftrace_event_file
2140 * descriptors created in order to allow for early events to
2141 * be recorded. This function is called after the debugfs has been
2142 * initialized, and we now have to create the files associated
2143 * to the events.
2144 */
2145static __init void
2146__trace_early_add_event_dirs(struct trace_array *tr)
2147{
2148 struct ftrace_event_file *file;
2149 int ret;
2150
2151
2152 list_for_each_entry(file, &tr->events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002153 ret = event_create_dir(tr->event_dir, file);
Steven Rostedt77248222013-02-27 16:28:06 -05002154 if (ret < 0)
2155 pr_warning("Could not create directory for event %s\n",
2156 file->event_call->name);
2157 }
2158}
2159
2160/*
2161 * For early boot up, the top trace array requires to have
2162 * a list of events that can be enabled. This must be done before
2163 * the filesystem is set up in order to allow events to be traced
2164 * early.
2165 */
2166static __init void
2167__trace_early_add_events(struct trace_array *tr)
2168{
2169 struct ftrace_event_call *call;
2170 int ret;
2171
2172 list_for_each_entry(call, &ftrace_events, list) {
2173 /* Early boot up should not have any modules loaded */
2174 if (WARN_ON_ONCE(call->mod))
2175 continue;
2176
2177 ret = __trace_early_add_new_event(call, tr);
2178 if (ret < 0)
2179 pr_warning("Could not create early event %s\n",
2180 call->name);
2181 }
2182}
2183
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002184/* Remove the event directory structure for a trace directory. */
2185static void
2186__trace_remove_event_dirs(struct trace_array *tr)
2187{
2188 struct ftrace_event_file *file, *next;
2189
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002190 list_for_each_entry_safe(file, next, &tr->events, list)
2191 remove_event_file_dir(file);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002192}
2193
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002194static void __add_event_to_tracers(struct ftrace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002195{
2196 struct trace_array *tr;
2197
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002198 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2199 __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002200}
2201
Steven Rostedte4a9ea52011-01-27 09:15:30 -05002202extern struct ftrace_event_call *__start_ftrace_events[];
2203extern struct ftrace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04002204
Li Zefan020e5f82009-07-01 10:47:05 +08002205static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2206
2207static __init int setup_trace_event(char *str)
2208{
2209 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002210 ring_buffer_expanded = true;
2211 tracing_selftest_disabled = true;
Li Zefan020e5f82009-07-01 10:47:05 +08002212
2213 return 1;
2214}
2215__setup("trace_event=", setup_trace_event);
2216
Steven Rostedt77248222013-02-27 16:28:06 -05002217/* Expects to have event_mutex held when called */
2218static int
2219create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002220{
2221 struct dentry *d_events;
2222 struct dentry *entry;
2223
2224 entry = debugfs_create_file("set_event", 0644, parent,
2225 tr, &ftrace_set_event_fops);
2226 if (!entry) {
2227 pr_warning("Could not create debugfs 'set_event' entry\n");
2228 return -ENOMEM;
2229 }
2230
2231 d_events = debugfs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04002232 if (!d_events) {
Steven Rostedtae63b312012-05-03 23:09:03 -04002233 pr_warning("Could not create debugfs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04002234 return -ENOMEM;
2235 }
Steven Rostedtae63b312012-05-03 23:09:03 -04002236
2237 /* ring buffer internal formats */
2238 trace_create_file("header_page", 0444, d_events,
2239 ring_buffer_print_page_header,
2240 &ftrace_show_header_fops);
2241
2242 trace_create_file("header_event", 0444, d_events,
2243 ring_buffer_print_entry_header,
2244 &ftrace_show_header_fops);
2245
2246 trace_create_file("enable", 0644, d_events,
2247 tr, &ftrace_tr_enable_fops);
2248
2249 tr->event_dir = d_events;
Steven Rostedt77248222013-02-27 16:28:06 -05002250
2251 return 0;
2252}
2253
2254/**
2255 * event_trace_add_tracer - add a instance of a trace_array to events
2256 * @parent: The parent dentry to place the files/directories for events in
2257 * @tr: The trace array associated with these events
2258 *
2259 * When a new instance is created, it needs to set up its events
2260 * directory, as well as other files associated with events. It also
2261 * creates the event hierachry in the @parent/events directory.
2262 *
2263 * Returns 0 on success.
2264 */
2265int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2266{
2267 int ret;
2268
2269 mutex_lock(&event_mutex);
2270
2271 ret = create_event_toplevel_files(parent, tr);
2272 if (ret)
2273 goto out_unlock;
2274
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002275 down_write(&trace_event_sem);
Steven Rostedtae63b312012-05-03 23:09:03 -04002276 __trace_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002277 up_write(&trace_event_sem);
Steven Rostedt277ba042012-08-03 16:10:49 -04002278
Steven Rostedt77248222013-02-27 16:28:06 -05002279 out_unlock:
Steven Rostedt277ba042012-08-03 16:10:49 -04002280 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002281
Steven Rostedt77248222013-02-27 16:28:06 -05002282 return ret;
2283}
2284
2285/*
2286 * The top trace array already had its file descriptors created.
2287 * Now the files themselves need to be created.
2288 */
2289static __init int
2290early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2291{
2292 int ret;
2293
2294 mutex_lock(&event_mutex);
2295
2296 ret = create_event_toplevel_files(parent, tr);
2297 if (ret)
2298 goto out_unlock;
2299
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002300 down_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002301 __trace_early_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002302 up_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002303
2304 out_unlock:
2305 mutex_unlock(&event_mutex);
2306
2307 return ret;
Steven Rostedtae63b312012-05-03 23:09:03 -04002308}
2309
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002310int event_trace_del_tracer(struct trace_array *tr)
2311{
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002312 mutex_lock(&event_mutex);
2313
Tom Zanussi85f2b082013-10-24 08:59:24 -05002314 /* Disable any event triggers and associated soft-disabled events */
2315 clear_event_triggers(tr);
2316
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -04002317 /* Disable any running events */
2318 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2319
Steven Rostedt3ccb0122013-12-03 12:41:20 -05002320 /* Access to events are within rcu_read_lock_sched() */
2321 synchronize_sched();
2322
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002323 down_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002324 __trace_remove_event_dirs(tr);
2325 debugfs_remove_recursive(tr->event_dir);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002326 up_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002327
2328 tr->event_dir = NULL;
2329
2330 mutex_unlock(&event_mutex);
2331
2332 return 0;
2333}
2334
Steven Rostedtd1a29142013-02-27 20:23:57 -05002335static __init int event_trace_memsetup(void)
2336{
2337 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2338 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2339 return 0;
2340}
2341
Ezequiel Garcia87819152012-09-12 11:47:57 -03002342static __init int event_trace_enable(void)
2343{
Steven Rostedtae63b312012-05-03 23:09:03 -04002344 struct trace_array *tr = top_trace_array();
Ezequiel Garcia87819152012-09-12 11:47:57 -03002345 struct ftrace_event_call **iter, *call;
2346 char *buf = bootup_event_buf;
2347 char *token;
2348 int ret;
2349
2350 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2351
2352 call = *iter;
2353 ret = event_init(call);
2354 if (!ret)
2355 list_add(&call->list, &ftrace_events);
2356 }
2357
Steven Rostedt77248222013-02-27 16:28:06 -05002358 /*
2359 * We need the top trace array to have a working set of trace
2360 * points at early init, before the debug files and directories
2361 * are created. Create the file entries now, and attach them
2362 * to the actual file dentries later.
2363 */
2364 __trace_early_add_events(tr);
2365
Ezequiel Garcia87819152012-09-12 11:47:57 -03002366 while (true) {
2367 token = strsep(&buf, ",");
2368
2369 if (!token)
2370 break;
2371 if (!*token)
2372 continue;
2373
Steven Rostedtae63b312012-05-03 23:09:03 -04002374 ret = ftrace_set_clr_event(tr, token, 1);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002375 if (ret)
2376 pr_warn("Failed to enable trace event: %s\n", token);
2377 }
Steven Rostedt81698832012-10-11 10:15:05 -04002378
2379 trace_printk_start_comm();
2380
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002381 register_event_cmds();
2382
Tom Zanussi85f2b082013-10-24 08:59:24 -05002383 register_trigger_cmds();
2384
Ezequiel Garcia87819152012-09-12 11:47:57 -03002385 return 0;
2386}
2387
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002388static __init int event_trace_init(void)
2389{
Steven Rostedtae63b312012-05-03 23:09:03 -04002390 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002391 struct dentry *d_tracer;
2392 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04002393 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002394
Steven Rostedtae63b312012-05-03 23:09:03 -04002395 tr = top_trace_array();
2396
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002397 d_tracer = tracing_init_dentry();
2398 if (!d_tracer)
2399 return 0;
2400
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002401 entry = debugfs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04002402 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002403 if (!entry)
2404 pr_warning("Could not create debugfs "
2405 "'available_events' entry\n");
2406
Li Zefan8728fe52010-05-24 16:22:49 +08002407 if (trace_define_common_fields())
2408 pr_warning("tracing: Failed to allocate common fields");
2409
Steven Rostedt77248222013-02-27 16:28:06 -05002410 ret = early_event_add_tracer(d_tracer, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002411 if (ret)
2412 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08002413
Oleg Nesterov836d4812013-07-31 19:31:37 +02002414#ifdef CONFIG_MODULES
Steven Rostedt6d723732009-04-10 14:53:50 -04002415 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08002416 if (ret)
Steven Rostedt6d723732009-04-10 14:53:50 -04002417 pr_warning("Failed to register trace events module notifier\n");
Oleg Nesterov836d4812013-07-31 19:31:37 +02002418#endif
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002419 return 0;
2420}
Steven Rostedtd1a29142013-02-27 20:23:57 -05002421early_initcall(event_trace_memsetup);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002422core_initcall(event_trace_enable);
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002423fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002424
2425#ifdef CONFIG_FTRACE_STARTUP_TEST
2426
2427static DEFINE_SPINLOCK(test_spinlock);
2428static DEFINE_SPINLOCK(test_spinlock_irq);
2429static DEFINE_MUTEX(test_mutex);
2430
2431static __init void test_work(struct work_struct *dummy)
2432{
2433 spin_lock(&test_spinlock);
2434 spin_lock_irq(&test_spinlock_irq);
2435 udelay(1);
2436 spin_unlock_irq(&test_spinlock_irq);
2437 spin_unlock(&test_spinlock);
2438
2439 mutex_lock(&test_mutex);
2440 msleep(1);
2441 mutex_unlock(&test_mutex);
2442}
2443
2444static __init int event_test_thread(void *unused)
2445{
2446 void *test_malloc;
2447
2448 test_malloc = kmalloc(1234, GFP_KERNEL);
2449 if (!test_malloc)
2450 pr_info("failed to kmalloc\n");
2451
2452 schedule_on_each_cpu(test_work);
2453
2454 kfree(test_malloc);
2455
2456 set_current_state(TASK_INTERRUPTIBLE);
2457 while (!kthread_should_stop())
2458 schedule();
2459
2460 return 0;
2461}
2462
2463/*
2464 * Do various things that may trigger events.
2465 */
2466static __init void event_test_stuff(void)
2467{
2468 struct task_struct *test_thread;
2469
2470 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2471 msleep(1);
2472 kthread_stop(test_thread);
2473}
2474
2475/*
2476 * For every trace event defined, we will test each trace point separately,
2477 * and then by groups, and finally all trace points.
2478 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002479static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04002480{
Steven Rostedtae63b312012-05-03 23:09:03 -04002481 struct ftrace_subsystem_dir *dir;
2482 struct ftrace_event_file *file;
Steven Rostedte6187002009-04-15 13:36:40 -04002483 struct ftrace_event_call *call;
2484 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04002485 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04002486 int ret;
2487
Steven Rostedtae63b312012-05-03 23:09:03 -04002488 tr = top_trace_array();
2489
Steven Rostedte6187002009-04-15 13:36:40 -04002490 pr_info("Running tests on trace events:\n");
2491
Steven Rostedtae63b312012-05-03 23:09:03 -04002492 list_for_each_entry(file, &tr->events, list) {
2493
2494 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04002495
Steven Rostedt22392912010-04-21 12:27:06 -04002496 /* Only test those that have a probe */
2497 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04002498 continue;
2499
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002500/*
2501 * Testing syscall events here is pretty useless, but
2502 * we still do it if configured. But this is time consuming.
2503 * What we really need is a user thread to perform the
2504 * syscalls as we test.
2505 */
2506#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04002507 if (call->class->system &&
2508 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002509 continue;
2510#endif
2511
Steven Rostedte6187002009-04-15 13:36:40 -04002512 pr_info("Testing event %s: ", call->name);
2513
2514 /*
2515 * If an event is already enabled, someone is using
2516 * it and the self test should not be on.
2517 */
Steven Rostedtae63b312012-05-03 23:09:03 -04002518 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
Steven Rostedte6187002009-04-15 13:36:40 -04002519 pr_warning("Enabled event during self test!\n");
2520 WARN_ON_ONCE(1);
2521 continue;
2522 }
2523
Steven Rostedtae63b312012-05-03 23:09:03 -04002524 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002525 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04002526 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002527
2528 pr_cont("OK\n");
2529 }
2530
2531 /* Now test at the sub system level */
2532
2533 pr_info("Running tests on trace event systems:\n");
2534
Steven Rostedtae63b312012-05-03 23:09:03 -04002535 list_for_each_entry(dir, &tr->systems, list) {
2536
2537 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04002538
2539 /* the ftrace system is special, skip it */
2540 if (strcmp(system->name, "ftrace") == 0)
2541 continue;
2542
2543 pr_info("Testing event system %s: ", system->name);
2544
Steven Rostedtae63b312012-05-03 23:09:03 -04002545 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002546 if (WARN_ON_ONCE(ret)) {
2547 pr_warning("error enabling system %s\n",
2548 system->name);
2549 continue;
2550 }
2551
2552 event_test_stuff();
2553
Steven Rostedtae63b312012-05-03 23:09:03 -04002554 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002555 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04002556 pr_warning("error disabling system %s\n",
2557 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002558 continue;
2559 }
Steven Rostedte6187002009-04-15 13:36:40 -04002560
2561 pr_cont("OK\n");
2562 }
2563
2564 /* Test with all events enabled */
2565
2566 pr_info("Running tests on all trace events:\n");
2567 pr_info("Testing all events: ");
2568
Steven Rostedtae63b312012-05-03 23:09:03 -04002569 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002570 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04002571 pr_warning("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002572 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002573 }
2574
2575 event_test_stuff();
2576
2577 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04002578 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002579 if (WARN_ON_ONCE(ret)) {
2580 pr_warning("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002581 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002582 }
2583
2584 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002585}
2586
2587#ifdef CONFIG_FUNCTION_TRACER
2588
Tejun Heo245b2e72009-06-24 15:13:48 +09002589static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002590
2591static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04002592function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04002593 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002594{
2595 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002596 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002597 struct ftrace_entry *entry;
2598 unsigned long flags;
2599 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002600 int cpu;
2601 int pc;
2602
2603 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002604 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002605 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09002606 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002607
2608 if (disabled != 1)
2609 goto out;
2610
2611 local_save_flags(flags);
2612
Steven Rostedte77405a2009-09-02 14:17:06 -04002613 event = trace_current_buffer_lock_reserve(&buffer,
2614 TRACE_FN, sizeof(*entry),
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002615 flags, pc);
2616 if (!event)
2617 goto out;
2618 entry = ring_buffer_event_data(event);
2619 entry->ip = ip;
2620 entry->parent_ip = parent_ip;
2621
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002622 trace_buffer_unlock_commit(buffer, event, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002623
2624 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09002625 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04002626 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002627}
2628
2629static struct ftrace_ops trace_ops __initdata =
2630{
2631 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04002632 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002633};
2634
2635static __init void event_trace_self_test_with_function(void)
2636{
Steven Rostedt17bb6152011-05-23 15:27:46 -04002637 int ret;
2638 ret = register_ftrace_function(&trace_ops);
2639 if (WARN_ON(ret < 0)) {
2640 pr_info("Failed to enable function tracer for event tests\n");
2641 return;
2642 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002643 pr_info("Running tests again, along with the function tracer\n");
2644 event_trace_self_tests();
2645 unregister_ftrace_function(&trace_ops);
2646}
2647#else
2648static __init void event_trace_self_test_with_function(void)
2649{
2650}
2651#endif
2652
2653static __init int event_trace_self_tests_init(void)
2654{
Li Zefan020e5f82009-07-01 10:47:05 +08002655 if (!tracing_selftest_disabled) {
2656 event_trace_self_tests();
2657 event_trace_self_test_with_function();
2658 }
Steven Rostedte6187002009-04-15 13:36:40 -04002659
2660 return 0;
2661}
2662
Steven Rostedt28d20e22009-04-20 12:12:44 -04002663late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002664
2665#endif