blob: e7a814b3906b1deee695037103bbb7457fe29990 [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Fabian Frederick3448bac2014-06-07 13:43:08 +020011#define pr_fmt(fmt) fmt
12
Steven Rostedte6187002009-04-15 13:36:40 -040013#include <linux/workqueue.h>
14#include <linux/spinlock.h>
15#include <linux/kthread.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050016#include <linux/debugfs.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/ctype.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040021#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050022
Li Zefan020e5f82009-07-01 10:47:05 +080023#include <asm/setup.h>
24
Steven Rostedt91729ef2009-03-02 15:03:01 -050025#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050026
Steven Rostedt4e5292e2009-09-12 19:26:21 -040027#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050028#define TRACE_SYSTEM "TRACE_SYSTEM"
29
Li Zefan20c89282009-05-06 10:33:45 +080030DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050031
Steven Rostedta59fd602009-04-10 13:52:20 -040032LIST_HEAD(ftrace_events);
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080033static LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040034
Steven Rostedtd1a29142013-02-27 20:23:57 -050035#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
36
37static struct kmem_cache *field_cachep;
38static struct kmem_cache *file_cachep;
39
Steven Rostedt6e94a782013-06-27 10:58:31 -040040#define SYSTEM_FL_FREE_NAME (1 << 31)
41
42static inline int system_refcount(struct event_subsystem *system)
43{
44 return system->ref_count & ~SYSTEM_FL_FREE_NAME;
45}
46
47static int system_refcount_inc(struct event_subsystem *system)
48{
49 return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
50}
51
52static int system_refcount_dec(struct event_subsystem *system)
53{
54 return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
55}
56
Steven Rostedtae63b312012-05-03 23:09:03 -040057/* Double loops, do not use break, only goto's work */
58#define do_for_each_event_file(tr, file) \
59 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
60 list_for_each_entry(file, &tr->events, list)
61
62#define do_for_each_event_file_safe(tr, file) \
63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
64 struct ftrace_event_file *___n; \
65 list_for_each_entry_safe(file, ___n, &tr->events, list)
66
67#define while_for_each_event_file() \
68 }
69
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080070static struct list_head *
Steven Rostedt2e33af02010-04-22 10:35:55 -040071trace_get_fields(struct ftrace_event_call *event_call)
72{
73 if (!event_call->class->get_fields)
74 return &event_call->class->fields;
75 return event_call->class->get_fields(event_call);
76}
77
zhangwei(Jovi)b3a8c6f2013-03-11 15:13:42 +080078static struct ftrace_event_field *
79__find_event_field(struct list_head *head, char *name)
80{
81 struct ftrace_event_field *field;
82
83 list_for_each_entry(field, head, link) {
84 if (!strcmp(field->name, name))
85 return field;
86 }
87
88 return NULL;
89}
90
91struct ftrace_event_field *
92trace_find_event_field(struct ftrace_event_call *call, char *name)
93{
94 struct ftrace_event_field *field;
95 struct list_head *head;
96
97 field = __find_event_field(&ftrace_common_fields, name);
98 if (field)
99 return field;
100
101 head = trace_get_fields(call);
102 return __find_event_field(head, name);
103}
104
Li Zefan8728fe52010-05-24 16:22:49 +0800105static int __trace_define_field(struct list_head *head, const char *type,
106 const char *name, int offset, int size,
107 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -0500108{
109 struct ftrace_event_field *field;
110
Steven Rostedtd1a29142013-02-27 20:23:57 -0500111 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
Tom Zanussicf027f62009-03-22 03:30:39 -0500112 if (!field)
Namhyung Kimaaf6ac02013-06-07 15:07:48 +0900113 return -ENOMEM;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100114
Steven Rostedt92edca02013-02-27 20:41:37 -0500115 field->name = name;
116 field->type = type;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +0100117
Li Zefan43b51ea2009-08-07 10:33:22 +0800118 if (filter_type == FILTER_OTHER)
119 field->filter_type = filter_assign_type(type);
120 else
121 field->filter_type = filter_type;
122
Tom Zanussicf027f62009-03-22 03:30:39 -0500123 field->offset = offset;
124 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -0500125 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +0800126
Steven Rostedt2e33af02010-04-22 10:35:55 -0400127 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -0500128
129 return 0;
Tom Zanussicf027f62009-03-22 03:30:39 -0500130}
Li Zefan8728fe52010-05-24 16:22:49 +0800131
132int trace_define_field(struct ftrace_event_call *call, const char *type,
133 const char *name, int offset, int size, int is_signed,
134 int filter_type)
135{
136 struct list_head *head;
137
138 if (WARN_ON(!call->class))
139 return 0;
140
141 head = trace_get_fields(call);
142 return __trace_define_field(head, type, name, offset, size,
143 is_signed, filter_type);
144}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400145EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500146
Li Zefane647d6b2009-08-19 15:54:32 +0800147#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800148 ret = __trace_define_field(&ftrace_common_fields, #type, \
149 "common_" #item, \
150 offsetof(typeof(ent), item), \
151 sizeof(ent.item), \
152 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800153 if (ret) \
154 return ret;
155
Li Zefan8728fe52010-05-24 16:22:49 +0800156static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800157{
158 int ret;
159 struct trace_entry ent;
160
161 __common_field(unsigned short, type);
162 __common_field(unsigned char, flags);
163 __common_field(unsigned char, preempt_count);
164 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800165
166 return ret;
167}
168
zhangwei(Jovi)ad7067c2013-03-11 15:13:46 +0800169static void trace_destroy_fields(struct ftrace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800170{
171 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400172 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800173
Steven Rostedt2e33af02010-04-22 10:35:55 -0400174 head = trace_get_fields(call);
175 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800176 list_del(&field->link);
Steven Rostedtd1a29142013-02-27 20:23:57 -0500177 kmem_cache_free(field_cachep, field);
Li Zefan2df75e42009-05-06 10:33:04 +0800178 }
179}
180
Li Zefan87d9b4e2009-12-08 11:14:20 +0800181int trace_event_raw_init(struct ftrace_event_call *call)
182{
183 int id;
184
Steven Rostedt80decc72010-04-23 10:00:22 -0400185 id = register_ftrace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800186 if (!id)
187 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800188
189 return 0;
190}
191EXPORT_SYMBOL_GPL(trace_event_raw_init);
192
Steven Rostedt3fd40d12012-08-09 22:42:57 -0400193void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
194 struct ftrace_event_file *ftrace_file,
195 unsigned long len)
196{
197 struct ftrace_event_call *event_call = ftrace_file->event_call;
198
199 local_save_flags(fbuffer->flags);
200 fbuffer->pc = preempt_count();
201 fbuffer->ftrace_file = ftrace_file;
202
203 fbuffer->event =
204 trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file,
205 event_call->event.type, len,
206 fbuffer->flags, fbuffer->pc);
207 if (!fbuffer->event)
208 return NULL;
209
210 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
211 return fbuffer->entry;
212}
213EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve);
214
215void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer)
216{
217 event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer,
218 fbuffer->event, fbuffer->entry,
219 fbuffer->flags, fbuffer->pc);
220}
221EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit);
222
Jiri Olsaceec0b62012-02-15 15:51:49 +0100223int ftrace_event_reg(struct ftrace_event_call *call,
224 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400225{
Steven Rostedtae63b312012-05-03 23:09:03 -0400226 struct ftrace_event_file *file = data;
227
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400228 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400229 switch (type) {
230 case TRACE_REG_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400231 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400232 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400233 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400234 case TRACE_REG_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400235 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400236 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400237 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400238 return 0;
239
240#ifdef CONFIG_PERF_EVENTS
241 case TRACE_REG_PERF_REGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400242 return tracepoint_probe_register(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400243 call->class->perf_probe,
244 call);
245 case TRACE_REG_PERF_UNREGISTER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400246 tracepoint_probe_unregister(call->tp,
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400247 call->class->perf_probe,
248 call);
249 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100250 case TRACE_REG_PERF_OPEN:
251 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100252 case TRACE_REG_PERF_ADD:
253 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100254 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400255#endif
256 }
257 return 0;
258}
259EXPORT_SYMBOL_GPL(ftrace_event_reg);
260
Li Zefane870e9a2010-07-02 11:07:32 +0800261void trace_event_enable_cmd_record(bool enable)
262{
Steven Rostedtae63b312012-05-03 23:09:03 -0400263 struct ftrace_event_file *file;
264 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800265
266 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400267 do_for_each_event_file(tr, file) {
268
269 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800270 continue;
271
272 if (enable) {
273 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400274 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800275 } else {
276 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400277 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800278 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400279 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800280 mutex_unlock(&event_mutex);
281}
282
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400283static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
284 int enable, int soft_disable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500285{
Steven Rostedtae63b312012-05-03 23:09:03 -0400286 struct ftrace_event_call *call = file->event_call;
Li Zefan3b8e4272009-12-08 11:14:52 +0800287 int ret = 0;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400288 int disable;
Li Zefan3b8e4272009-12-08 11:14:52 +0800289
Steven Rostedtfd994982009-02-28 02:41:25 -0500290 switch (enable) {
291 case 0:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400292 /*
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900293 * When soft_disable is set and enable is cleared, the sm_ref
294 * reference counter is decremented. If it reaches 0, we want
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400295 * to clear the SOFT_DISABLED flag but leave the event in the
296 * state that it was. That is, if the event was enabled and
297 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
298 * is set we do not want the event to be enabled before we
299 * clear the bit.
300 *
301 * When soft_disable is not set but the SOFT_MODE flag is,
302 * we do nothing. Do not disable the tracepoint, otherwise
303 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
304 */
305 if (soft_disable) {
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900306 if (atomic_dec_return(&file->sm_ref) > 0)
307 break;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400308 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
309 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
310 } else
311 disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE);
312
313 if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) {
314 clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
Steven Rostedtae63b312012-05-03 23:09:03 -0400315 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800316 tracing_stop_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400317 clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800318 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400319 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500320 }
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500321 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400322 if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
323 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Tom Zanussi3baa5e42013-06-29 00:08:07 -0500324 else
325 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Steven Rostedtfd994982009-02-28 02:41:25 -0500326 break;
327 case 1:
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400328 /*
329 * When soft_disable is set and enable is set, we want to
330 * register the tracepoint for the event, but leave the event
331 * as is. That means, if the event was already enabled, we do
332 * nothing (but set SOFT_MODE). If the event is disabled, we
333 * set SOFT_DISABLED before enabling the event tracepoint, so
334 * it still seems to be disabled.
335 */
336 if (!soft_disable)
337 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900338 else {
339 if (atomic_inc_return(&file->sm_ref) > 1)
340 break;
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400341 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
Masami Hiramatsu1cf4c072013-05-09 14:44:29 +0900342 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400343
Steven Rostedtae63b312012-05-03 23:09:03 -0400344 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400345
346 /* Keep the event disabled, when going to SOFT_MODE. */
347 if (soft_disable)
348 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
349
Li Zefane870e9a2010-07-02 11:07:32 +0800350 if (trace_flags & TRACE_ITER_RECORD_CMD) {
351 tracing_start_cmdline_record();
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400352 set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags);
Li Zefane870e9a2010-07-02 11:07:32 +0800353 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400354 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800355 if (ret) {
356 tracing_stop_cmdline_record();
357 pr_info("event trace: Could not enable event "
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400358 "%s\n", ftrace_event_name(call));
Li Zefan3b8e4272009-12-08 11:14:52 +0800359 break;
360 }
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400361 set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -0500362
363 /* WAS_ENABLED gets set but never cleared. */
364 call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500365 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500366 break;
367 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800368
369 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500370}
371
Tom Zanussi85f2b082013-10-24 08:59:24 -0500372int trace_event_enable_disable(struct ftrace_event_file *file,
373 int enable, int soft_disable)
374{
375 return __ftrace_event_enable_disable(file, enable, soft_disable);
376}
377
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400378static int ftrace_event_enable_disable(struct ftrace_event_file *file,
379 int enable)
380{
381 return __ftrace_event_enable_disable(file, enable, 0);
382}
383
Steven Rostedtae63b312012-05-03 23:09:03 -0400384static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800385{
Steven Rostedtae63b312012-05-03 23:09:03 -0400386 struct ftrace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800387
388 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400389 list_for_each_entry(file, &tr->events, list) {
390 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800391 }
392 mutex_unlock(&event_mutex);
393}
394
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400395static void __put_system(struct event_subsystem *system)
396{
397 struct event_filter *filter = system->filter;
398
Steven Rostedt6e94a782013-06-27 10:58:31 -0400399 WARN_ON_ONCE(system_refcount(system) == 0);
400 if (system_refcount_dec(system))
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400401 return;
402
Steven Rostedtae63b312012-05-03 23:09:03 -0400403 list_del(&system->list);
404
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400405 if (filter) {
406 kfree(filter->filter_string);
407 kfree(filter);
408 }
Steven Rostedt6e94a782013-06-27 10:58:31 -0400409 if (system->ref_count & SYSTEM_FL_FREE_NAME)
410 kfree(system->name);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400411 kfree(system);
412}
413
414static void __get_system(struct event_subsystem *system)
415{
Steven Rostedt6e94a782013-06-27 10:58:31 -0400416 WARN_ON_ONCE(system_refcount(system) == 0);
417 system_refcount_inc(system);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400418}
419
Steven Rostedtae63b312012-05-03 23:09:03 -0400420static void __get_system_dir(struct ftrace_subsystem_dir *dir)
421{
422 WARN_ON_ONCE(dir->ref_count == 0);
423 dir->ref_count++;
424 __get_system(dir->subsystem);
425}
426
427static void __put_system_dir(struct ftrace_subsystem_dir *dir)
428{
429 WARN_ON_ONCE(dir->ref_count == 0);
430 /* If the subsystem is about to be freed, the dir must be too */
Steven Rostedt6e94a782013-06-27 10:58:31 -0400431 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
Steven Rostedtae63b312012-05-03 23:09:03 -0400432
433 __put_system(dir->subsystem);
434 if (!--dir->ref_count)
435 kfree(dir);
436}
437
438static void put_system(struct ftrace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400439{
440 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400441 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400442 mutex_unlock(&event_mutex);
443}
444
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200445static void remove_subsystem(struct ftrace_subsystem_dir *dir)
446{
447 if (!dir)
448 return;
449
450 if (!--dir->nr_events) {
451 debugfs_remove_recursive(dir->entry);
452 list_del(&dir->list);
453 __put_system_dir(dir);
454 }
455}
456
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200457static void remove_event_file_dir(struct ftrace_event_file *file)
458{
Oleg Nesterovbf682c32013-07-28 20:35:27 +0200459 struct dentry *dir = file->dir;
460 struct dentry *child;
461
462 if (dir) {
463 spin_lock(&dir->d_lock); /* probably unneeded */
464 list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
465 if (child->d_inode) /* probably unneeded */
466 child->d_inode->i_private = NULL;
467 }
468 spin_unlock(&dir->d_lock);
469
470 debugfs_remove_recursive(dir);
471 }
472
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200473 list_del(&file->list);
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +0200474 remove_subsystem(file->system);
475 kmem_cache_free(file_cachep, file);
476}
477
Li Zefan8f31bfe2009-05-08 10:31:42 +0800478/*
479 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
480 */
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400481static int
482__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
483 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500484{
Steven Rostedtae63b312012-05-03 23:09:03 -0400485 struct ftrace_event_file *file;
Steven Rostedta59fd602009-04-10 13:52:20 -0400486 struct ftrace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400487 const char *name;
Steven Rostedt29f93942009-05-08 16:06:47 -0400488 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500489
Steven Rostedtae63b312012-05-03 23:09:03 -0400490 list_for_each_entry(file, &tr->events, list) {
491
492 call = file->event_call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400493 name = ftrace_event_name(call);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500494
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400495 if (!name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500496 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500497
Steven Rostedt9b637762012-05-10 15:55:43 -0400498 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
499 continue;
500
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500501 if (match &&
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400502 strcmp(match, name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400503 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500504 continue;
505
Steven Rostedt8f082012010-04-20 10:47:33 -0400506 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500507 continue;
508
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400509 if (event && strcmp(event, name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500510 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500511
Steven Rostedtae63b312012-05-03 23:09:03 -0400512 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500513
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500514 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500515 }
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -0400516
517 return ret;
518}
519
520static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
521 const char *sub, const char *event, int set)
522{
523 int ret;
524
525 mutex_lock(&event_mutex);
526 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500527 mutex_unlock(&event_mutex);
528
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500529 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500530}
531
Steven Rostedtae63b312012-05-03 23:09:03 -0400532static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800533{
534 char *event = NULL, *sub = NULL, *match;
535
536 /*
537 * The buf format can be <subsystem>:<event-name>
538 * *:<event-name> means any event by that name.
539 * :<event-name> is the same.
540 *
541 * <subsystem>:* means all events in that subsystem
542 * <subsystem>: means the same.
543 *
544 * <name> (no ':') means all events in a subsystem with
545 * the name <name> or any event that matches <name>
546 */
547
548 match = strsep(&buf, ":");
549 if (buf) {
550 sub = match;
551 event = buf;
552 match = NULL;
553
554 if (!strlen(sub) || strcmp(sub, "*") == 0)
555 sub = NULL;
556 if (!strlen(event) || strcmp(event, "*") == 0)
557 event = NULL;
558 }
559
Steven Rostedtae63b312012-05-03 23:09:03 -0400560 return __ftrace_set_clr_event(tr, match, sub, event, set);
Li Zefan8f31bfe2009-05-08 10:31:42 +0800561}
562
Steven Rostedt4671c792009-05-08 16:27:41 -0400563/**
564 * trace_set_clr_event - enable or disable an event
565 * @system: system name to match (NULL for any system)
566 * @event: event name to match (NULL for all events, within system)
567 * @set: 1 to enable, 0 to disable
568 *
569 * This is a way for other parts of the kernel to enable or disable
570 * event recording.
571 *
572 * Returns 0 on success, -EINVAL if the parameters do not match any
573 * registered events.
574 */
575int trace_set_clr_event(const char *system, const char *event, int set)
576{
Steven Rostedtae63b312012-05-03 23:09:03 -0400577 struct trace_array *tr = top_trace_array();
578
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +0900579 if (!tr)
580 return -ENODEV;
581
Steven Rostedtae63b312012-05-03 23:09:03 -0400582 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400583}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800584EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400585
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500586/* 128 should be much more than enough */
587#define EVENT_BUF_SIZE 127
588
589static ssize_t
590ftrace_event_write(struct file *file, const char __user *ubuf,
591 size_t cnt, loff_t *ppos)
592{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200593 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400594 struct seq_file *m = file->private_data;
595 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800596 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500597
Li Zefan4ba79782009-09-22 13:52:20 +0800598 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500599 return 0;
600
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400601 ret = tracing_update_buffers();
602 if (ret < 0)
603 return ret;
604
jolsa@redhat.com48966362009-09-11 17:29:28 +0200605 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500606 return -ENOMEM;
607
jolsa@redhat.com48966362009-09-11 17:29:28 +0200608 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500609
Li Zefan4ba79782009-09-22 13:52:20 +0800610 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200611 int set = 1;
612
613 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500614 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500615
jolsa@redhat.com48966362009-09-11 17:29:28 +0200616 parser.buffer[parser.idx] = 0;
617
Steven Rostedtae63b312012-05-03 23:09:03 -0400618 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500619 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200620 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500621 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500622
623 ret = read;
624
jolsa@redhat.com48966362009-09-11 17:29:28 +0200625 out_put:
626 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500627
628 return ret;
629}
630
631static void *
632t_next(struct seq_file *m, void *v, loff_t *pos)
633{
Steven Rostedtae63b312012-05-03 23:09:03 -0400634 struct ftrace_event_file *file = v;
635 struct ftrace_event_call *call;
636 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500637
638 (*pos)++;
639
Steven Rostedtae63b312012-05-03 23:09:03 -0400640 list_for_each_entry_continue(file, &tr->events, list) {
641 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400642 /*
643 * The ftrace subsystem is for showing formats only.
644 * They can not be enabled or disabled via the event files.
645 */
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400646 if (call->class && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -0400647 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400648 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500649
Li Zefan30bd39c2009-09-18 14:07:05 +0800650 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500651}
652
653static void *t_start(struct seq_file *m, loff_t *pos)
654{
Steven Rostedtae63b312012-05-03 23:09:03 -0400655 struct ftrace_event_file *file;
656 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800657 loff_t l;
658
Li Zefan20c89282009-05-06 10:33:45 +0800659 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800660
Steven Rostedtae63b312012-05-03 23:09:03 -0400661 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800662 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400663 file = t_next(m, file, &l);
664 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800665 break;
666 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400667 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500668}
669
670static void *
671s_next(struct seq_file *m, void *v, loff_t *pos)
672{
Steven Rostedtae63b312012-05-03 23:09:03 -0400673 struct ftrace_event_file *file = v;
674 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500675
676 (*pos)++;
677
Steven Rostedtae63b312012-05-03 23:09:03 -0400678 list_for_each_entry_continue(file, &tr->events, list) {
679 if (file->flags & FTRACE_EVENT_FL_ENABLED)
680 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500681 }
682
Li Zefan30bd39c2009-09-18 14:07:05 +0800683 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500684}
685
686static void *s_start(struct seq_file *m, loff_t *pos)
687{
Steven Rostedtae63b312012-05-03 23:09:03 -0400688 struct ftrace_event_file *file;
689 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800690 loff_t l;
691
Li Zefan20c89282009-05-06 10:33:45 +0800692 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800693
Steven Rostedtae63b312012-05-03 23:09:03 -0400694 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800695 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400696 file = s_next(m, file, &l);
697 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800698 break;
699 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400700 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500701}
702
703static int t_show(struct seq_file *m, void *v)
704{
Steven Rostedtae63b312012-05-03 23:09:03 -0400705 struct ftrace_event_file *file = v;
706 struct ftrace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500707
Steven Rostedt8f082012010-04-20 10:47:33 -0400708 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
709 seq_printf(m, "%s:", call->class->system);
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400710 seq_printf(m, "%s\n", ftrace_event_name(call));
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500711
712 return 0;
713}
714
715static void t_stop(struct seq_file *m, void *p)
716{
Li Zefan20c89282009-05-06 10:33:45 +0800717 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500718}
719
Steven Rostedt1473e442009-02-24 14:15:08 -0500720static ssize_t
721event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
722 loff_t *ppos)
723{
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200724 struct ftrace_event_file *file;
725 unsigned long flags;
Tom Zanussia4390592013-06-29 00:08:04 -0500726 char buf[4] = "0";
Steven Rostedt1473e442009-02-24 14:15:08 -0500727
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200728 mutex_lock(&event_mutex);
729 file = event_file_data(filp);
730 if (likely(file))
731 flags = file->flags;
732 mutex_unlock(&event_mutex);
733
734 if (!file)
735 return -ENODEV;
736
737 if (flags & FTRACE_EVENT_FL_ENABLED &&
738 !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
Tom Zanussia4390592013-06-29 00:08:04 -0500739 strcpy(buf, "1");
740
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200741 if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
742 flags & FTRACE_EVENT_FL_SOFT_MODE)
Tom Zanussia4390592013-06-29 00:08:04 -0500743 strcat(buf, "*");
744
745 strcat(buf, "\n");
Steven Rostedt1473e442009-02-24 14:15:08 -0500746
Steven Rostedt (Red Hat)417944c2013-03-12 13:26:18 -0400747 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
Steven Rostedt1473e442009-02-24 14:15:08 -0500748}
749
750static ssize_t
751event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
752 loff_t *ppos)
753{
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200754 struct ftrace_event_file *file;
Steven Rostedt1473e442009-02-24 14:15:08 -0500755 unsigned long val;
756 int ret;
757
Peter Huewe22fe9b52011-06-07 21:58:27 +0200758 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
759 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -0500760 return ret;
761
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400762 ret = tracing_update_buffers();
763 if (ret < 0)
764 return ret;
765
Steven Rostedt1473e442009-02-24 14:15:08 -0500766 switch (val) {
767 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -0500768 case 1:
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200769 ret = -ENODEV;
Steven Rostedt11a241a2009-03-02 11:49:04 -0500770 mutex_lock(&event_mutex);
Oleg Nesterovbc6f6b02013-07-26 19:25:36 +0200771 file = event_file_data(filp);
772 if (likely(file))
773 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500774 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -0500775 break;
776
777 default:
778 return -EINVAL;
779 }
780
781 *ppos += cnt;
782
Li Zefan3b8e4272009-12-08 11:14:52 +0800783 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -0500784}
785
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400786static ssize_t
787system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
788 loff_t *ppos)
789{
Li Zefanc142b152009-05-08 10:32:05 +0800790 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedtae63b312012-05-03 23:09:03 -0400791 struct ftrace_subsystem_dir *dir = filp->private_data;
792 struct event_subsystem *system = dir->subsystem;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400793 struct ftrace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400794 struct ftrace_event_file *file;
795 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400796 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +0800797 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400798 int ret;
799
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400800 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400801 list_for_each_entry(file, &tr->events, list) {
802 call = file->event_call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400803 if (!ftrace_event_name(call) || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400804 continue;
805
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400806 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400807 continue;
808
809 /*
810 * We need to find out if all the events are set
811 * or if all events or cleared, or if we have
812 * a mixture.
813 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400814 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +0800815
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400816 /*
817 * If we have a mixture, no need to look further.
818 */
Li Zefanc142b152009-05-08 10:32:05 +0800819 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400820 break;
821 }
822 mutex_unlock(&event_mutex);
823
Li Zefanc142b152009-05-08 10:32:05 +0800824 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400825 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400826
827 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
828
829 return ret;
830}
831
832static ssize_t
833system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
834 loff_t *ppos)
835{
Steven Rostedtae63b312012-05-03 23:09:03 -0400836 struct ftrace_subsystem_dir *dir = filp->private_data;
837 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400838 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400839 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400840 ssize_t ret;
841
Peter Huewe22fe9b52011-06-07 21:58:27 +0200842 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
843 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400844 return ret;
845
846 ret = tracing_update_buffers();
847 if (ret < 0)
848 return ret;
849
Li Zefan8f31bfe2009-05-08 10:31:42 +0800850 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400851 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400852
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400853 /*
854 * Opening of "enable" adds a ref count to system,
855 * so the name is safe to use.
856 */
857 if (system)
858 name = system->name;
859
Steven Rostedtae63b312012-05-03 23:09:03 -0400860 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400861 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800862 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400863
864 ret = cnt;
865
Li Zefan8f31bfe2009-05-08 10:31:42 +0800866out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400867 *ppos += cnt;
868
869 return ret;
870}
871
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400872enum {
873 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +0800874 FORMAT_FIELD_SEPERATOR = 2,
875 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400876};
877
878static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -0500879{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200880 struct ftrace_event_call *call = event_file_data(m->private);
Li Zefan86397dc2010-08-17 13:53:06 +0800881 struct list_head *common_head = &ftrace_common_fields;
882 struct list_head *head = trace_get_fields(call);
Oleg Nesterov7710b632013-07-18 20:47:10 +0200883 struct list_head *node = v;
Steven Rostedt981d0812009-03-02 13:53:59 -0500884
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400885 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800886
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400887 switch ((unsigned long)v) {
888 case FORMAT_HEADER:
Oleg Nesterov7710b632013-07-18 20:47:10 +0200889 node = common_head;
890 break;
Li Zefan86397dc2010-08-17 13:53:06 +0800891
892 case FORMAT_FIELD_SEPERATOR:
Oleg Nesterov7710b632013-07-18 20:47:10 +0200893 node = head;
894 break;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800895
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400896 case FORMAT_PRINTFMT:
897 /* all done */
898 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800899 }
900
Oleg Nesterov7710b632013-07-18 20:47:10 +0200901 node = node->prev;
902 if (node == common_head)
Li Zefan86397dc2010-08-17 13:53:06 +0800903 return (void *)FORMAT_FIELD_SEPERATOR;
Oleg Nesterov7710b632013-07-18 20:47:10 +0200904 else if (node == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400905 return (void *)FORMAT_PRINTFMT;
Oleg Nesterov7710b632013-07-18 20:47:10 +0200906 else
907 return node;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400908}
909
910static int f_show(struct seq_file *m, void *v)
911{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200912 struct ftrace_event_call *call = event_file_data(m->private);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400913 struct ftrace_event_field *field;
914 const char *array_descriptor;
915
916 switch ((unsigned long)v) {
917 case FORMAT_HEADER:
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -0400918 seq_printf(m, "name: %s\n", ftrace_event_name(call));
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400919 seq_printf(m, "ID: %d\n", call->event.type);
920 seq_printf(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +0800921 return 0;
922
Li Zefan86397dc2010-08-17 13:53:06 +0800923 case FORMAT_FIELD_SEPERATOR:
924 seq_putc(m, '\n');
925 return 0;
926
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400927 case FORMAT_PRINTFMT:
928 seq_printf(m, "\nprint fmt: %s\n",
929 call->print_fmt);
930 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500931 }
932
Oleg Nesterov7710b632013-07-18 20:47:10 +0200933 field = list_entry(v, struct ftrace_event_field, link);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400934 /*
935 * Smartly shows the array type(except dynamic array).
936 * Normal:
937 * field:TYPE VAR
938 * If TYPE := TYPE[LEN], it is shown:
939 * field:TYPE VAR[LEN]
940 */
941 array_descriptor = strchr(field->type, '[');
942
943 if (!strncmp(field->type, "__data_loc", 10))
944 array_descriptor = NULL;
945
946 if (!array_descriptor)
947 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
948 field->type, field->name, field->offset,
949 field->size, !!field->is_signed);
950 else
951 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
952 (int)(array_descriptor - field->type),
953 field->type, field->name,
954 array_descriptor, field->offset,
955 field->size, !!field->is_signed);
956
957 return 0;
958}
959
Oleg Nesterov7710b632013-07-18 20:47:10 +0200960static void *f_start(struct seq_file *m, loff_t *pos)
961{
962 void *p = (void *)FORMAT_HEADER;
963 loff_t l = 0;
964
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200965 /* ->stop() is called even if ->start() fails */
966 mutex_lock(&event_mutex);
967 if (!event_file_data(m->private))
968 return ERR_PTR(-ENODEV);
969
Oleg Nesterov7710b632013-07-18 20:47:10 +0200970 while (l < *pos && p)
971 p = f_next(m, p, &l);
972
973 return p;
974}
975
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400976static void f_stop(struct seq_file *m, void *p)
977{
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200978 mutex_unlock(&event_mutex);
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400979}
980
981static const struct seq_operations trace_format_seq_ops = {
982 .start = f_start,
983 .next = f_next,
984 .stop = f_stop,
985 .show = f_show,
986};
987
988static int trace_format_open(struct inode *inode, struct file *file)
989{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400990 struct seq_file *m;
991 int ret;
992
993 ret = seq_open(file, &trace_format_seq_ops);
994 if (ret < 0)
995 return ret;
996
997 m = file->private_data;
Oleg Nesterovc5a44a12013-07-26 19:25:43 +0200998 m->private = file;
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400999
1000 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -05001001}
1002
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001003static ssize_t
1004event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1005{
Oleg Nesterov1a111262013-07-26 19:25:32 +02001006 int id = (long)event_file_data(filp);
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001007 char buf[32];
1008 int len;
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001009
1010 if (*ppos)
1011 return 0;
1012
Oleg Nesterov1a111262013-07-26 19:25:32 +02001013 if (unlikely(!id))
1014 return -ENODEV;
1015
1016 len = sprintf(buf, "%d\n", id);
1017
Oleg Nesterovcd458ba2013-07-18 20:47:12 +02001018 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001019}
1020
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001021static ssize_t
1022event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1023 loff_t *ppos)
1024{
Tom Zanussif306cc82013-10-24 08:34:17 -05001025 struct ftrace_event_file *file;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001026 struct trace_seq *s;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001027 int r = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001028
1029 if (*ppos)
1030 return 0;
1031
1032 s = kmalloc(sizeof(*s), GFP_KERNEL);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001033
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001034 if (!s)
1035 return -ENOMEM;
1036
1037 trace_seq_init(s);
1038
Oleg Nesterove2912b02013-07-26 19:25:40 +02001039 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001040 file = event_file_data(filp);
1041 if (file)
1042 print_event_filter(file, s);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001043 mutex_unlock(&event_mutex);
1044
Tom Zanussif306cc82013-10-24 08:34:17 -05001045 if (file)
Oleg Nesterove2912b02013-07-26 19:25:40 +02001046 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001047
1048 kfree(s);
1049
1050 return r;
1051}
1052
1053static ssize_t
1054event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1055 loff_t *ppos)
1056{
Tom Zanussif306cc82013-10-24 08:34:17 -05001057 struct ftrace_event_file *file;
Tom Zanussi8b372562009-04-28 03:04:59 -05001058 char *buf;
Oleg Nesterove2912b02013-07-26 19:25:40 +02001059 int err = -ENODEV;
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001060
Tom Zanussi8b372562009-04-28 03:04:59 -05001061 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001062 return -EINVAL;
1063
Tom Zanussi8b372562009-04-28 03:04:59 -05001064 buf = (char *)__get_free_page(GFP_TEMPORARY);
1065 if (!buf)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001066 return -ENOMEM;
1067
Tom Zanussi8b372562009-04-28 03:04:59 -05001068 if (copy_from_user(buf, ubuf, cnt)) {
1069 free_page((unsigned long) buf);
1070 return -EFAULT;
1071 }
1072 buf[cnt] = '\0';
1073
Oleg Nesterove2912b02013-07-26 19:25:40 +02001074 mutex_lock(&event_mutex);
Tom Zanussif306cc82013-10-24 08:34:17 -05001075 file = event_file_data(filp);
1076 if (file)
1077 err = apply_event_filter(file, buf);
Oleg Nesterove2912b02013-07-26 19:25:40 +02001078 mutex_unlock(&event_mutex);
1079
Tom Zanussi8b372562009-04-28 03:04:59 -05001080 free_page((unsigned long) buf);
1081 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001082 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -05001083
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001084 *ppos += cnt;
1085
1086 return cnt;
1087}
1088
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001089static LIST_HEAD(event_subsystems);
1090
1091static int subsystem_open(struct inode *inode, struct file *filp)
1092{
1093 struct event_subsystem *system = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -04001094 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1095 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001096 int ret;
1097
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001098 if (tracing_is_disabled())
1099 return -ENODEV;
1100
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001101 /* Make sure the system still exists */
Alexander Z Lama8227412013-07-01 19:37:54 -07001102 mutex_lock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001103 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001104 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1105 list_for_each_entry(dir, &tr->systems, list) {
1106 if (dir == inode->i_private) {
1107 /* Don't open systems with no events */
1108 if (dir->nr_events) {
1109 __get_system_dir(dir);
1110 system = dir->subsystem;
1111 }
1112 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001113 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001114 }
1115 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001116 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001117 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001118 mutex_unlock(&trace_types_lock);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001119
Steven Rostedtae63b312012-05-03 23:09:03 -04001120 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001121 return -ENODEV;
1122
Steven Rostedtae63b312012-05-03 23:09:03 -04001123 /* Some versions of gcc think dir can be uninitialized here */
1124 WARN_ON(!dir);
1125
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001126 /* Still need to increment the ref count of the system */
1127 if (trace_array_get(tr) < 0) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001128 put_system(dir);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001129 return -ENODEV;
1130 }
1131
1132 ret = tracing_open_generic(inode, filp);
1133 if (ret < 0) {
1134 trace_array_put(tr);
1135 put_system(dir);
1136 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001137
1138 return ret;
1139}
1140
1141static int system_tr_open(struct inode *inode, struct file *filp)
1142{
1143 struct ftrace_subsystem_dir *dir;
1144 struct trace_array *tr = inode->i_private;
1145 int ret;
1146
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001147 if (tracing_is_disabled())
1148 return -ENODEV;
1149
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001150 if (trace_array_get(tr) < 0)
1151 return -ENODEV;
1152
Steven Rostedtae63b312012-05-03 23:09:03 -04001153 /* Make a temporary dir that has no system but points to tr */
1154 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001155 if (!dir) {
1156 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001157 return -ENOMEM;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001158 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001159
1160 dir->tr = tr;
1161
1162 ret = tracing_open_generic(inode, filp);
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001163 if (ret < 0) {
1164 trace_array_put(tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001165 kfree(dir);
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001166 return ret;
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001167 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001168
1169 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001170
Geyslan G. Bemd6d35232013-11-06 16:02:51 -03001171 return 0;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001172}
1173
1174static int subsystem_release(struct inode *inode, struct file *file)
1175{
Steven Rostedtae63b312012-05-03 23:09:03 -04001176 struct ftrace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001177
Steven Rostedt (Red Hat)8e2e2fa2013-07-02 15:30:53 -04001178 trace_array_put(dir->tr);
1179
Steven Rostedtae63b312012-05-03 23:09:03 -04001180 /*
1181 * If dir->subsystem is NULL, then this is a temporary
1182 * descriptor that was made for a trace_array to enable
1183 * all subsystems.
1184 */
1185 if (dir->subsystem)
1186 put_system(dir);
1187 else
1188 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001189
1190 return 0;
1191}
1192
Tom Zanussicfb180f2009-03-22 03:31:17 -05001193static ssize_t
1194subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1195 loff_t *ppos)
1196{
Steven Rostedtae63b312012-05-03 23:09:03 -04001197 struct ftrace_subsystem_dir *dir = filp->private_data;
1198 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001199 struct trace_seq *s;
1200 int r;
1201
1202 if (*ppos)
1203 return 0;
1204
1205 s = kmalloc(sizeof(*s), GFP_KERNEL);
1206 if (!s)
1207 return -ENOMEM;
1208
1209 trace_seq_init(s);
1210
Tom Zanussi8b372562009-04-28 03:04:59 -05001211 print_subsystem_event_filter(system, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -05001212 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001213
1214 kfree(s);
1215
1216 return r;
1217}
1218
1219static ssize_t
1220subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1221 loff_t *ppos)
1222{
Steven Rostedtae63b312012-05-03 23:09:03 -04001223 struct ftrace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001224 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001225 int err;
1226
Tom Zanussi8b372562009-04-28 03:04:59 -05001227 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001228 return -EINVAL;
1229
Tom Zanussi8b372562009-04-28 03:04:59 -05001230 buf = (char *)__get_free_page(GFP_TEMPORARY);
1231 if (!buf)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001232 return -ENOMEM;
1233
Tom Zanussi8b372562009-04-28 03:04:59 -05001234 if (copy_from_user(buf, ubuf, cnt)) {
1235 free_page((unsigned long) buf);
1236 return -EFAULT;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001237 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001238 buf[cnt] = '\0';
Tom Zanussicfb180f2009-03-22 03:31:17 -05001239
Steven Rostedtae63b312012-05-03 23:09:03 -04001240 err = apply_subsystem_event_filter(dir, buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001241 free_page((unsigned long) buf);
1242 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001243 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001244
1245 *ppos += cnt;
1246
1247 return cnt;
1248}
1249
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001250static ssize_t
1251show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1252{
1253 int (*func)(struct trace_seq *s) = filp->private_data;
1254 struct trace_seq *s;
1255 int r;
1256
1257 if (*ppos)
1258 return 0;
1259
1260 s = kmalloc(sizeof(*s), GFP_KERNEL);
1261 if (!s)
1262 return -ENOMEM;
1263
1264 trace_seq_init(s);
1265
1266 func(s);
1267 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1268
1269 kfree(s);
1270
1271 return r;
1272}
1273
Steven Rostedt15075ca2012-05-03 14:57:28 -04001274static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1275static int ftrace_event_set_open(struct inode *inode, struct file *file);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001276static int ftrace_event_release(struct inode *inode, struct file *file);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001277
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001278static const struct seq_operations show_event_seq_ops = {
1279 .start = t_start,
1280 .next = t_next,
1281 .show = t_show,
1282 .stop = t_stop,
1283};
1284
1285static const struct seq_operations show_set_event_seq_ops = {
1286 .start = s_start,
1287 .next = s_next,
1288 .show = t_show,
1289 .stop = t_stop,
1290};
1291
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001292static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001293 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001294 .read = seq_read,
1295 .llseek = seq_lseek,
1296 .release = seq_release,
1297};
1298
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001299static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001300 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001301 .read = seq_read,
1302 .write = ftrace_event_write,
1303 .llseek = seq_lseek,
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001304 .release = ftrace_event_release,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001305};
1306
Steven Rostedt1473e442009-02-24 14:15:08 -05001307static const struct file_operations ftrace_enable_fops = {
Oleg Nesterovbf682c32013-07-28 20:35:27 +02001308 .open = tracing_open_generic,
Steven Rostedt1473e442009-02-24 14:15:08 -05001309 .read = event_enable_read,
1310 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001311 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001312};
1313
Steven Rostedt981d0812009-03-02 13:53:59 -05001314static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001315 .open = trace_format_open,
1316 .read = seq_read,
1317 .llseek = seq_lseek,
1318 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001319};
1320
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001321static const struct file_operations ftrace_event_id_fops = {
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001322 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001323 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001324};
1325
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001326static const struct file_operations ftrace_event_filter_fops = {
1327 .open = tracing_open_generic,
1328 .read = event_filter_read,
1329 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001330 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001331};
1332
Tom Zanussicfb180f2009-03-22 03:31:17 -05001333static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001334 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001335 .read = subsystem_filter_read,
1336 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001337 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001338 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001339};
1340
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001341static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001342 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001343 .read = system_enable_read,
1344 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001345 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001346 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001347};
1348
Steven Rostedtae63b312012-05-03 23:09:03 -04001349static const struct file_operations ftrace_tr_enable_fops = {
1350 .open = system_tr_open,
1351 .read = system_enable_read,
1352 .write = system_enable_write,
1353 .llseek = default_llseek,
1354 .release = subsystem_release,
1355};
1356
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001357static const struct file_operations ftrace_show_header_fops = {
1358 .open = tracing_open_generic,
1359 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001360 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001361};
1362
Steven Rostedtae63b312012-05-03 23:09:03 -04001363static int
1364ftrace_event_open(struct inode *inode, struct file *file,
1365 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001366{
Steven Rostedtae63b312012-05-03 23:09:03 -04001367 struct seq_file *m;
1368 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001369
Steven Rostedtae63b312012-05-03 23:09:03 -04001370 ret = seq_open(file, seq_ops);
1371 if (ret < 0)
1372 return ret;
1373 m = file->private_data;
1374 /* copy tr over to seq ops */
1375 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001376
Steven Rostedtae63b312012-05-03 23:09:03 -04001377 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001378}
1379
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001380static int ftrace_event_release(struct inode *inode, struct file *file)
1381{
1382 struct trace_array *tr = inode->i_private;
1383
1384 trace_array_put(tr);
1385
1386 return seq_release(inode, file);
1387}
1388
Steven Rostedt15075ca2012-05-03 14:57:28 -04001389static int
1390ftrace_event_avail_open(struct inode *inode, struct file *file)
1391{
1392 const struct seq_operations *seq_ops = &show_event_seq_ops;
1393
Steven Rostedtae63b312012-05-03 23:09:03 -04001394 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001395}
1396
1397static int
1398ftrace_event_set_open(struct inode *inode, struct file *file)
1399{
1400 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001401 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001402 int ret;
1403
1404 if (trace_array_get(tr) < 0)
1405 return -ENODEV;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001406
1407 if ((file->f_mode & FMODE_WRITE) &&
1408 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001409 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001410
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07001411 ret = ftrace_event_open(inode, file, seq_ops);
1412 if (ret < 0)
1413 trace_array_put(tr);
1414 return ret;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001415}
1416
Steven Rostedtae63b312012-05-03 23:09:03 -04001417static struct event_subsystem *
1418create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001419{
1420 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001421
1422 /* need to create new entry */
1423 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001424 if (!system)
1425 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001426
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001427 system->ref_count = 1;
Steven Rostedt6e94a782013-06-27 10:58:31 -04001428
1429 /* Only allocate if dynamic (kprobes and modules) */
1430 if (!core_kernel_data((unsigned long)name)) {
1431 system->ref_count |= SYSTEM_FL_FREE_NAME;
1432 system->name = kstrdup(name, GFP_KERNEL);
1433 if (!system->name)
1434 goto out_free;
1435 } else
1436 system->name = name;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001437
Tom Zanussi30e673b2009-04-28 03:04:47 -05001438 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001439
Tom Zanussi8b372562009-04-28 03:04:59 -05001440 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001441 if (!system->filter)
1442 goto out_free;
1443
1444 list_add(&system->list, &event_subsystems);
1445
1446 return system;
1447
1448 out_free:
Steven Rostedt6e94a782013-06-27 10:58:31 -04001449 if (system->ref_count & SYSTEM_FL_FREE_NAME)
1450 kfree(system->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001451 kfree(system);
1452 return NULL;
1453}
1454
1455static struct dentry *
1456event_subsystem_dir(struct trace_array *tr, const char *name,
1457 struct ftrace_event_file *file, struct dentry *parent)
1458{
1459 struct ftrace_subsystem_dir *dir;
1460 struct event_subsystem *system;
1461 struct dentry *entry;
1462
1463 /* First see if we did not already create this dir */
1464 list_for_each_entry(dir, &tr->systems, list) {
1465 system = dir->subsystem;
1466 if (strcmp(system->name, name) == 0) {
1467 dir->nr_events++;
1468 file->system = dir;
1469 return dir->entry;
1470 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001471 }
1472
Steven Rostedtae63b312012-05-03 23:09:03 -04001473 /* Now see if the system itself exists. */
1474 list_for_each_entry(system, &event_subsystems, list) {
1475 if (strcmp(system->name, name) == 0)
1476 break;
1477 }
1478 /* Reset system variable when not found */
1479 if (&system->list == &event_subsystems)
1480 system = NULL;
1481
1482 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1483 if (!dir)
1484 goto out_fail;
1485
1486 if (!system) {
1487 system = create_new_subsystem(name);
1488 if (!system)
1489 goto out_free;
1490 } else
1491 __get_system(system);
1492
1493 dir->entry = debugfs_create_dir(name, parent);
1494 if (!dir->entry) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001495 pr_warn("Failed to create system directory %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001496 __put_system(system);
1497 goto out_free;
1498 }
1499
1500 dir->tr = tr;
1501 dir->ref_count = 1;
1502 dir->nr_events = 1;
1503 dir->subsystem = system;
1504 file->system = dir;
1505
1506 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001507 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001508 if (!entry) {
1509 kfree(system->filter);
1510 system->filter = NULL;
Fabian Frederick3448bac2014-06-07 13:43:08 +02001511 pr_warn("Could not create debugfs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001512 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001513
Steven Rostedtae63b312012-05-03 23:09:03 -04001514 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001515 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001516
Steven Rostedtae63b312012-05-03 23:09:03 -04001517 list_add(&dir->list, &tr->systems);
1518
1519 return dir->entry;
1520
1521 out_free:
1522 kfree(dir);
1523 out_fail:
1524 /* Only print this message if failed on memory allocation */
1525 if (!dir || !system)
Fabian Frederick3448bac2014-06-07 13:43:08 +02001526 pr_warn("No memory to create event subsystem %s\n", name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001527 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001528}
1529
Steven Rostedt1473e442009-02-24 14:15:08 -05001530static int
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001531event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
Steven Rostedt1473e442009-02-24 14:15:08 -05001532{
Steven Rostedtae63b312012-05-03 23:09:03 -04001533 struct ftrace_event_call *call = file->event_call;
1534 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001535 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001536 struct dentry *d_events;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001537 const char *name;
Steven Rostedtfd994982009-02-28 02:41:25 -05001538 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001539
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001540 /*
1541 * If the trace point header did not define TRACE_SYSTEM
1542 * then the system would be called "TRACE_SYSTEM".
1543 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001544 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1545 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1546 if (!d_events)
1547 return -ENOMEM;
1548 } else
1549 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001550
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001551 name = ftrace_event_name(call);
1552 file->dir = debugfs_create_dir(name, d_events);
Steven Rostedtae63b312012-05-03 23:09:03 -04001553 if (!file->dir) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001554 pr_warn("Could not create debugfs '%s' directory\n", name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001555 return -1;
1556 }
1557
Steven Rostedt9b637762012-05-10 15:55:43 -04001558 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001559 trace_create_file("enable", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001560 &ftrace_enable_fops);
Steven Rostedt1473e442009-02-24 14:15:08 -05001561
Steven Rostedt22392912010-04-21 12:27:06 -04001562#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001563 if (call->event.type && call->class->reg)
Oleg Nesterov1a111262013-07-26 19:25:32 +02001564 trace_create_file("id", 0444, file->dir,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001565 (void *)(long)call->event.type,
1566 &ftrace_event_id_fops);
Steven Rostedt22392912010-04-21 12:27:06 -04001567#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001568
Li Zefanc9d932c2010-05-24 16:24:28 +08001569 /*
1570 * Other events may have the same class. Only update
1571 * the fields if they are not already defined.
1572 */
1573 head = trace_get_fields(call);
1574 if (list_empty(head)) {
1575 ret = call->class->define_fields(call);
1576 if (ret < 0) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02001577 pr_warn("Could not initialize trace point events/%s\n",
1578 name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001579 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05001580 }
1581 }
Tom Zanussif306cc82013-10-24 08:34:17 -05001582 trace_create_file("filter", 0644, file->dir, file,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001583 &ftrace_event_filter_fops);
Tom Zanussicf027f62009-03-22 03:30:39 -05001584
Tom Zanussi85f2b082013-10-24 08:59:24 -05001585 trace_create_file("trigger", 0644, file->dir, file,
1586 &event_trigger_fops);
1587
Steven Rostedtae63b312012-05-03 23:09:03 -04001588 trace_create_file("format", 0444, file->dir, call,
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001589 &ftrace_event_format_fops);
Steven Rostedtfd994982009-02-28 02:41:25 -05001590
Steven Rostedt1473e442009-02-24 14:15:08 -05001591 return 0;
1592}
1593
Steven Rostedtae63b312012-05-03 23:09:03 -04001594static void remove_event_from_tracers(struct ftrace_event_call *call)
1595{
1596 struct ftrace_event_file *file;
1597 struct trace_array *tr;
1598
1599 do_for_each_event_file_safe(tr, file) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001600 if (file->event_call != call)
1601 continue;
1602
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02001603 remove_event_file_dir(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001604 /*
1605 * The do_for_each_event_file_safe() is
1606 * a double loop. After finding the call for this
1607 * trace_array, we use break to jump to the next
1608 * trace_array.
1609 */
1610 break;
1611 } while_for_each_event_file();
1612}
1613
Ezequiel Garcia87819152012-09-12 11:47:57 -03001614static void event_remove(struct ftrace_event_call *call)
1615{
Steven Rostedtae63b312012-05-03 23:09:03 -04001616 struct trace_array *tr;
1617 struct ftrace_event_file *file;
1618
1619 do_for_each_event_file(tr, file) {
1620 if (file->event_call != call)
1621 continue;
1622 ftrace_event_enable_disable(file, 0);
Tom Zanussif306cc82013-10-24 08:34:17 -05001623 destroy_preds(file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001624 /*
1625 * The do_for_each_event_file() is
1626 * a double loop. After finding the call for this
1627 * trace_array, we use break to jump to the next
1628 * trace_array.
1629 */
1630 break;
1631 } while_for_each_event_file();
1632
Ezequiel Garcia87819152012-09-12 11:47:57 -03001633 if (call->event.funcs)
1634 __unregister_ftrace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04001635 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001636 list_del(&call->list);
1637}
1638
1639static int event_init(struct ftrace_event_call *call)
1640{
1641 int ret = 0;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001642 const char *name;
Ezequiel Garcia87819152012-09-12 11:47:57 -03001643
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001644 name = ftrace_event_name(call);
1645 if (WARN_ON(!name))
Ezequiel Garcia87819152012-09-12 11:47:57 -03001646 return -EINVAL;
1647
1648 if (call->class->raw_init) {
1649 ret = call->class->raw_init(call);
1650 if (ret < 0 && ret != -ENOSYS)
Fabian Frederick3448bac2014-06-07 13:43:08 +02001651 pr_warn("Could not initialize trace events/%s\n", name);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001652 }
1653
1654 return ret;
1655}
1656
Li Zefan67ead0a2010-05-24 16:25:13 +08001657static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001658__register_event(struct ftrace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001659{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001660 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04001661
Ezequiel Garcia87819152012-09-12 11:47:57 -03001662 ret = event_init(call);
1663 if (ret < 0)
1664 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04001665
Steven Rostedtae63b312012-05-03 23:09:03 -04001666 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08001667 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001668
Steven Rostedtae63b312012-05-03 23:09:03 -04001669 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001670}
1671
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001672static struct ftrace_event_file *
1673trace_create_new_event(struct ftrace_event_call *call,
1674 struct trace_array *tr)
1675{
1676 struct ftrace_event_file *file;
1677
1678 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
1679 if (!file)
1680 return NULL;
1681
1682 file->event_call = call;
1683 file->tr = tr;
1684 atomic_set(&file->sm_ref, 0);
Tom Zanussi85f2b082013-10-24 08:59:24 -05001685 atomic_set(&file->tm_ref, 0);
1686 INIT_LIST_HEAD(&file->triggers);
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001687 list_add(&file->list, &tr->events);
1688
1689 return file;
1690}
1691
Steven Rostedtae63b312012-05-03 23:09:03 -04001692/* Add an event to a trace directory */
1693static int
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001694__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04001695{
1696 struct ftrace_event_file *file;
1697
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001698 file = trace_create_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001699 if (!file)
1700 return -ENOMEM;
1701
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001702 return event_create_dir(tr->event_dir, file);
Steven Rostedtae63b312012-05-03 23:09:03 -04001703}
1704
Steven Rostedt77248222013-02-27 16:28:06 -05001705/*
1706 * Just create a decriptor for early init. A descriptor is required
1707 * for enabling events at boot. We want to enable events before
1708 * the filesystem is initialized.
1709 */
1710static __init int
1711__trace_early_add_new_event(struct ftrace_event_call *call,
1712 struct trace_array *tr)
1713{
1714 struct ftrace_event_file *file;
1715
Steven Rostedt (Red Hat)da511bf2013-05-09 15:00:07 -04001716 file = trace_create_new_event(call, tr);
Steven Rostedt77248222013-02-27 16:28:06 -05001717 if (!file)
1718 return -ENOMEM;
1719
Steven Rostedt77248222013-02-27 16:28:06 -05001720 return 0;
1721}
1722
Steven Rostedtae63b312012-05-03 23:09:03 -04001723struct ftrace_module_file_ops;
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001724static void __add_event_to_tracers(struct ftrace_event_call *call);
Steven Rostedtae63b312012-05-03 23:09:03 -04001725
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001726/* Add an additional event_call dynamically */
1727int trace_add_event_call(struct ftrace_event_call *call)
1728{
1729 int ret;
Alexander Z Lama8227412013-07-01 19:37:54 -07001730 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001731 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001732
1733 ret = __register_event(call, NULL);
1734 if (ret >= 0)
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001735 __add_event_to_tracers(call);
Steven Rostedtae63b312012-05-03 23:09:03 -04001736
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001737 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001738 mutex_unlock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001739 return ret;
1740}
Steven Rostedt701970b2009-04-24 23:11:22 -04001741
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001742/*
Alexander Z Lama8227412013-07-01 19:37:54 -07001743 * Must be called under locking of trace_types_lock, event_mutex and
1744 * trace_event_sem.
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001745 */
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001746static void __trace_remove_event_call(struct ftrace_event_call *call)
1747{
Ezequiel Garcia87819152012-09-12 11:47:57 -03001748 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001749 trace_destroy_fields(call);
Tom Zanussif306cc82013-10-24 08:34:17 -05001750 destroy_call_preds(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001751}
1752
Oleg Nesterov2816c552013-07-29 19:50:33 +02001753static int probe_remove_event_call(struct ftrace_event_call *call)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001754{
Oleg Nesterov2816c552013-07-29 19:50:33 +02001755 struct trace_array *tr;
1756 struct ftrace_event_file *file;
1757
1758#ifdef CONFIG_PERF_EVENTS
1759 if (call->perf_refcount)
1760 return -EBUSY;
1761#endif
1762 do_for_each_event_file(tr, file) {
1763 if (file->event_call != call)
1764 continue;
1765 /*
1766 * We can't rely on ftrace_event_enable_disable(enable => 0)
1767 * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
1768 * TRACE_REG_UNREGISTER.
1769 */
1770 if (file->flags & FTRACE_EVENT_FL_ENABLED)
1771 return -EBUSY;
Steven Rostedt (Red Hat)2ba64032013-07-31 13:16:22 -04001772 /*
1773 * The do_for_each_event_file_safe() is
1774 * a double loop. After finding the call for this
1775 * trace_array, we use break to jump to the next
1776 * trace_array.
1777 */
Oleg Nesterov2816c552013-07-29 19:50:33 +02001778 break;
1779 } while_for_each_event_file();
1780
1781 __trace_remove_event_call(call);
1782
1783 return 0;
1784}
1785
1786/* Remove an event_call */
1787int trace_remove_event_call(struct ftrace_event_call *call)
1788{
1789 int ret;
1790
Alexander Z Lama8227412013-07-01 19:37:54 -07001791 mutex_lock(&trace_types_lock);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001792 mutex_lock(&event_mutex);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001793 down_write(&trace_event_sem);
Oleg Nesterov2816c552013-07-29 19:50:33 +02001794 ret = probe_remove_event_call(call);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001795 up_write(&trace_event_sem);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001796 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001797 mutex_unlock(&trace_types_lock);
Oleg Nesterov2816c552013-07-29 19:50:33 +02001798
1799 return ret;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001800}
1801
1802#define for_each_event(event, start, end) \
1803 for (event = start; \
1804 (unsigned long)event < (unsigned long)end; \
1805 event++)
1806
1807#ifdef CONFIG_MODULES
1808
Steven Rostedt6d723732009-04-10 14:53:50 -04001809static void trace_module_add_events(struct module *mod)
1810{
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001811 struct ftrace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04001812
Steven Rostedt (Red Hat)45ab2812014-02-26 13:37:38 -05001813 if (!mod->num_trace_events)
1814 return;
1815
1816 /* Don't add infrastructure for mods without tracepoints */
1817 if (trace_module_has_bad_taint(mod)) {
1818 pr_err("%s: module has bad taint, not creating trace events\n",
1819 mod->name);
1820 return;
1821 }
1822
Steven Rostedt6d723732009-04-10 14:53:50 -04001823 start = mod->trace_events;
1824 end = mod->trace_events + mod->num_trace_events;
1825
Steven Rostedt6d723732009-04-10 14:53:50 -04001826 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001827 __register_event(*call, mod);
Oleg Nesterov779c5e32013-07-31 19:31:32 +02001828 __add_event_to_tracers(*call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001829 }
1830}
1831
1832static void trace_module_remove_events(struct module *mod)
1833{
1834 struct ftrace_event_call *call, *p;
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001835 bool clear_trace = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04001836
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001837 down_write(&trace_event_sem);
Steven Rostedt6d723732009-04-10 14:53:50 -04001838 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1839 if (call->mod == mod) {
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001840 if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
1841 clear_trace = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001842 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001843 }
1844 }
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08001845 up_write(&trace_event_sem);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001846
1847 /*
1848 * It is safest to reset the ring buffer if the module being unloaded
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001849 * registered any events that were used. The only worry is if
1850 * a new module gets loaded, and takes on the same id as the events
1851 * of this module. When printing out the buffer, traced events left
1852 * over from this module may be passed to the new module events and
1853 * unexpected results may occur.
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001854 */
Steven Rostedt (Red Hat)575380d2013-03-04 23:05:12 -05001855 if (clear_trace)
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001856 tracing_reset_all_online_cpus();
Steven Rostedt6d723732009-04-10 14:53:50 -04001857}
1858
Steven Rostedt61f919a2009-04-14 18:22:32 -04001859static int trace_module_notify(struct notifier_block *self,
1860 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04001861{
1862 struct module *mod = data;
1863
Alexander Z Lama8227412013-07-01 19:37:54 -07001864 mutex_lock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04001865 mutex_lock(&event_mutex);
1866 switch (val) {
1867 case MODULE_STATE_COMING:
1868 trace_module_add_events(mod);
1869 break;
1870 case MODULE_STATE_GOING:
1871 trace_module_remove_events(mod);
1872 break;
1873 }
1874 mutex_unlock(&event_mutex);
Alexander Z Lama8227412013-07-01 19:37:54 -07001875 mutex_unlock(&trace_types_lock);
Steven Rostedt6d723732009-04-10 14:53:50 -04001876
1877 return 0;
1878}
Steven Rostedt (Red Hat)315326c2013-03-02 17:37:14 -05001879
Oleg Nesterov836d4812013-07-31 19:31:37 +02001880static struct notifier_block trace_module_nb = {
1881 .notifier_call = trace_module_notify,
1882 .priority = 0,
1883};
Steven Rostedt61f919a2009-04-14 18:22:32 -04001884#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04001885
Steven Rostedtae63b312012-05-03 23:09:03 -04001886/* Create a new event directory structure for a trace directory. */
1887static void
1888__trace_add_event_dirs(struct trace_array *tr)
1889{
Steven Rostedtae63b312012-05-03 23:09:03 -04001890 struct ftrace_event_call *call;
1891 int ret;
1892
1893 list_for_each_entry(call, &ftrace_events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02001894 ret = __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04001895 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02001896 pr_warn("Could not create directory for event %s\n",
1897 ftrace_event_name(call));
Steven Rostedtae63b312012-05-03 23:09:03 -04001898 }
1899}
1900
Tom Zanussi7862ad12013-10-24 08:59:28 -05001901struct ftrace_event_file *
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001902find_event_file(struct trace_array *tr, const char *system, const char *event)
1903{
1904 struct ftrace_event_file *file;
1905 struct ftrace_event_call *call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001906 const char *name;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001907
1908 list_for_each_entry(file, &tr->events, list) {
1909
1910 call = file->event_call;
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001911 name = ftrace_event_name(call);
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001912
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001913 if (!name || !call->class || !call->class->reg)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001914 continue;
1915
1916 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
1917 continue;
1918
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001919 if (strcmp(event, name) == 0 &&
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001920 strcmp(system, call->class->system) == 0)
1921 return file;
1922 }
1923 return NULL;
1924}
1925
Steven Rostedt (Red Hat)2875a082013-12-20 23:23:05 -05001926#ifdef CONFIG_DYNAMIC_FTRACE
1927
1928/* Avoid typos */
1929#define ENABLE_EVENT_STR "enable_event"
1930#define DISABLE_EVENT_STR "disable_event"
1931
1932struct event_probe_data {
1933 struct ftrace_event_file *file;
1934 unsigned long count;
1935 int ref;
1936 bool enable;
1937};
1938
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001939static void
1940event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1941{
1942 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1943 struct event_probe_data *data = *pdata;
1944
1945 if (!data)
1946 return;
1947
1948 if (data->enable)
1949 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1950 else
1951 set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags);
1952}
1953
1954static void
1955event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
1956{
1957 struct event_probe_data **pdata = (struct event_probe_data **)_data;
1958 struct event_probe_data *data = *pdata;
1959
1960 if (!data)
1961 return;
1962
1963 if (!data->count)
1964 return;
1965
1966 /* Skip if the event is in a state we want to switch to */
1967 if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
1968 return;
1969
1970 if (data->count != -1)
1971 (data->count)--;
1972
1973 event_enable_probe(ip, parent_ip, _data);
1974}
1975
1976static int
1977event_enable_print(struct seq_file *m, unsigned long ip,
1978 struct ftrace_probe_ops *ops, void *_data)
1979{
1980 struct event_probe_data *data = _data;
1981
1982 seq_printf(m, "%ps:", (void *)ip);
1983
1984 seq_printf(m, "%s:%s:%s",
1985 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
1986 data->file->event_call->class->system,
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04001987 ftrace_event_name(data->file->event_call));
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04001988
1989 if (data->count == -1)
1990 seq_printf(m, ":unlimited\n");
1991 else
1992 seq_printf(m, ":count=%ld\n", data->count);
1993
1994 return 0;
1995}
1996
1997static int
1998event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
1999 void **_data)
2000{
2001 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2002 struct event_probe_data *data = *pdata;
2003
2004 data->ref++;
2005 return 0;
2006}
2007
2008static void
2009event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2010 void **_data)
2011{
2012 struct event_probe_data **pdata = (struct event_probe_data **)_data;
2013 struct event_probe_data *data = *pdata;
2014
2015 if (WARN_ON_ONCE(data->ref <= 0))
2016 return;
2017
2018 data->ref--;
2019 if (!data->ref) {
2020 /* Remove the SOFT_MODE flag */
2021 __ftrace_event_enable_disable(data->file, 0, 1);
2022 module_put(data->file->event_call->mod);
2023 kfree(data);
2024 }
2025 *pdata = NULL;
2026}
2027
2028static struct ftrace_probe_ops event_enable_probe_ops = {
2029 .func = event_enable_probe,
2030 .print = event_enable_print,
2031 .init = event_enable_init,
2032 .free = event_enable_free,
2033};
2034
2035static struct ftrace_probe_ops event_enable_count_probe_ops = {
2036 .func = event_enable_count_probe,
2037 .print = event_enable_print,
2038 .init = event_enable_init,
2039 .free = event_enable_free,
2040};
2041
2042static struct ftrace_probe_ops event_disable_probe_ops = {
2043 .func = event_enable_probe,
2044 .print = event_enable_print,
2045 .init = event_enable_init,
2046 .free = event_enable_free,
2047};
2048
2049static struct ftrace_probe_ops event_disable_count_probe_ops = {
2050 .func = event_enable_count_probe,
2051 .print = event_enable_print,
2052 .init = event_enable_init,
2053 .free = event_enable_free,
2054};
2055
2056static int
2057event_enable_func(struct ftrace_hash *hash,
2058 char *glob, char *cmd, char *param, int enabled)
2059{
2060 struct trace_array *tr = top_trace_array();
2061 struct ftrace_event_file *file;
2062 struct ftrace_probe_ops *ops;
2063 struct event_probe_data *data;
2064 const char *system;
2065 const char *event;
2066 char *number;
2067 bool enable;
2068 int ret;
2069
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002070 if (!tr)
2071 return -ENODEV;
2072
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002073 /* hash funcs only work with set_ftrace_filter */
Harsh Prateek Bora8092e802013-05-24 12:52:17 +05302074 if (!enabled || !param)
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002075 return -EINVAL;
2076
2077 system = strsep(&param, ":");
2078 if (!param)
2079 return -EINVAL;
2080
2081 event = strsep(&param, ":");
2082
2083 mutex_lock(&event_mutex);
2084
2085 ret = -EINVAL;
2086 file = find_event_file(tr, system, event);
2087 if (!file)
2088 goto out;
2089
2090 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2091
2092 if (enable)
2093 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2094 else
2095 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2096
2097 if (glob[0] == '!') {
2098 unregister_ftrace_function_probe_func(glob+1, ops);
2099 ret = 0;
2100 goto out;
2101 }
2102
2103 ret = -ENOMEM;
2104 data = kzalloc(sizeof(*data), GFP_KERNEL);
2105 if (!data)
2106 goto out;
2107
2108 data->enable = enable;
2109 data->count = -1;
2110 data->file = file;
2111
2112 if (!param)
2113 goto out_reg;
2114
2115 number = strsep(&param, ":");
2116
2117 ret = -EINVAL;
2118 if (!strlen(number))
2119 goto out_free;
2120
2121 /*
2122 * We use the callback data field (which is a pointer)
2123 * as our counter.
2124 */
2125 ret = kstrtoul(number, 0, &data->count);
2126 if (ret)
2127 goto out_free;
2128
2129 out_reg:
2130 /* Don't let event modules unload while probe registered */
2131 ret = try_module_get(file->event_call->mod);
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002132 if (!ret) {
2133 ret = -EBUSY;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002134 goto out_free;
Masami Hiramatsu6ed01062013-05-16 20:48:49 +09002135 }
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002136
2137 ret = __ftrace_event_enable_disable(file, 1, 1);
2138 if (ret < 0)
2139 goto out_put;
2140 ret = register_ftrace_function_probe(glob, ops, data);
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002141 /*
2142 * The above returns on success the # of functions enabled,
2143 * but if it didn't find any functions it returns zero.
2144 * Consider no functions a failure too.
2145 */
Masami Hiramatsua5b85bd2013-05-09 14:44:14 +09002146 if (!ret) {
2147 ret = -ENOENT;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002148 goto out_disable;
Steven Rostedt (Red Hat)ff305de2013-05-09 11:30:26 -04002149 } else if (ret < 0)
2150 goto out_disable;
2151 /* Just return zero, not the number of enabled functions */
2152 ret = 0;
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002153 out:
2154 mutex_unlock(&event_mutex);
2155 return ret;
2156
2157 out_disable:
2158 __ftrace_event_enable_disable(file, 0, 1);
2159 out_put:
2160 module_put(file->event_call->mod);
2161 out_free:
2162 kfree(data);
2163 goto out;
2164}
2165
2166static struct ftrace_func_command event_enable_cmd = {
2167 .name = ENABLE_EVENT_STR,
2168 .func = event_enable_func,
2169};
2170
2171static struct ftrace_func_command event_disable_cmd = {
2172 .name = DISABLE_EVENT_STR,
2173 .func = event_enable_func,
2174};
2175
2176static __init int register_event_cmds(void)
2177{
2178 int ret;
2179
2180 ret = register_ftrace_command(&event_enable_cmd);
2181 if (WARN_ON(ret < 0))
2182 return ret;
2183 ret = register_ftrace_command(&event_disable_cmd);
2184 if (WARN_ON(ret < 0))
2185 unregister_ftrace_command(&event_enable_cmd);
2186 return ret;
2187}
2188#else
2189static inline int register_event_cmds(void) { return 0; }
2190#endif /* CONFIG_DYNAMIC_FTRACE */
2191
Steven Rostedt77248222013-02-27 16:28:06 -05002192/*
2193 * The top level array has already had its ftrace_event_file
2194 * descriptors created in order to allow for early events to
2195 * be recorded. This function is called after the debugfs has been
2196 * initialized, and we now have to create the files associated
2197 * to the events.
2198 */
2199static __init void
2200__trace_early_add_event_dirs(struct trace_array *tr)
2201{
2202 struct ftrace_event_file *file;
2203 int ret;
2204
2205
2206 list_for_each_entry(file, &tr->events, list) {
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002207 ret = event_create_dir(tr->event_dir, file);
Steven Rostedt77248222013-02-27 16:28:06 -05002208 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002209 pr_warn("Could not create directory for event %s\n",
2210 ftrace_event_name(file->event_call));
Steven Rostedt77248222013-02-27 16:28:06 -05002211 }
2212}
2213
2214/*
2215 * For early boot up, the top trace array requires to have
2216 * a list of events that can be enabled. This must be done before
2217 * the filesystem is set up in order to allow events to be traced
2218 * early.
2219 */
2220static __init void
2221__trace_early_add_events(struct trace_array *tr)
2222{
2223 struct ftrace_event_call *call;
2224 int ret;
2225
2226 list_for_each_entry(call, &ftrace_events, list) {
2227 /* Early boot up should not have any modules loaded */
2228 if (WARN_ON_ONCE(call->mod))
2229 continue;
2230
2231 ret = __trace_early_add_new_event(call, tr);
2232 if (ret < 0)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002233 pr_warn("Could not create early event %s\n",
2234 ftrace_event_name(call));
Steven Rostedt77248222013-02-27 16:28:06 -05002235 }
2236}
2237
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002238/* Remove the event directory structure for a trace directory. */
2239static void
2240__trace_remove_event_dirs(struct trace_array *tr)
2241{
2242 struct ftrace_event_file *file, *next;
2243
Oleg Nesterovf6a84bd2013-07-26 19:25:47 +02002244 list_for_each_entry_safe(file, next, &tr->events, list)
2245 remove_event_file_dir(file);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002246}
2247
Oleg Nesterov779c5e32013-07-31 19:31:32 +02002248static void __add_event_to_tracers(struct ftrace_event_call *call)
Steven Rostedtae63b312012-05-03 23:09:03 -04002249{
2250 struct trace_array *tr;
2251
Oleg Nesterov620a30e2013-07-31 19:31:35 +02002252 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2253 __trace_add_new_event(call, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002254}
2255
Steven Rostedte4a9ea52011-01-27 09:15:30 -05002256extern struct ftrace_event_call *__start_ftrace_events[];
2257extern struct ftrace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04002258
Li Zefan020e5f82009-07-01 10:47:05 +08002259static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2260
2261static __init int setup_trace_event(char *str)
2262{
2263 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05002264 ring_buffer_expanded = true;
2265 tracing_selftest_disabled = true;
Li Zefan020e5f82009-07-01 10:47:05 +08002266
2267 return 1;
2268}
2269__setup("trace_event=", setup_trace_event);
2270
Steven Rostedt77248222013-02-27 16:28:06 -05002271/* Expects to have event_mutex held when called */
2272static int
2273create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04002274{
2275 struct dentry *d_events;
2276 struct dentry *entry;
2277
2278 entry = debugfs_create_file("set_event", 0644, parent,
2279 tr, &ftrace_set_event_fops);
2280 if (!entry) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002281 pr_warn("Could not create debugfs 'set_event' entry\n");
Steven Rostedtae63b312012-05-03 23:09:03 -04002282 return -ENOMEM;
2283 }
2284
2285 d_events = debugfs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04002286 if (!d_events) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002287 pr_warn("Could not create debugfs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04002288 return -ENOMEM;
2289 }
Steven Rostedtae63b312012-05-03 23:09:03 -04002290
2291 /* ring buffer internal formats */
2292 trace_create_file("header_page", 0444, d_events,
2293 ring_buffer_print_page_header,
2294 &ftrace_show_header_fops);
2295
2296 trace_create_file("header_event", 0444, d_events,
2297 ring_buffer_print_entry_header,
2298 &ftrace_show_header_fops);
2299
2300 trace_create_file("enable", 0644, d_events,
2301 tr, &ftrace_tr_enable_fops);
2302
2303 tr->event_dir = d_events;
Steven Rostedt77248222013-02-27 16:28:06 -05002304
2305 return 0;
2306}
2307
2308/**
2309 * event_trace_add_tracer - add a instance of a trace_array to events
2310 * @parent: The parent dentry to place the files/directories for events in
2311 * @tr: The trace array associated with these events
2312 *
2313 * When a new instance is created, it needs to set up its events
2314 * directory, as well as other files associated with events. It also
2315 * creates the event hierachry in the @parent/events directory.
2316 *
2317 * Returns 0 on success.
2318 */
2319int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2320{
2321 int ret;
2322
2323 mutex_lock(&event_mutex);
2324
2325 ret = create_event_toplevel_files(parent, tr);
2326 if (ret)
2327 goto out_unlock;
2328
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002329 down_write(&trace_event_sem);
Steven Rostedtae63b312012-05-03 23:09:03 -04002330 __trace_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002331 up_write(&trace_event_sem);
Steven Rostedt277ba042012-08-03 16:10:49 -04002332
Steven Rostedt77248222013-02-27 16:28:06 -05002333 out_unlock:
Steven Rostedt277ba042012-08-03 16:10:49 -04002334 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04002335
Steven Rostedt77248222013-02-27 16:28:06 -05002336 return ret;
2337}
2338
2339/*
2340 * The top trace array already had its file descriptors created.
2341 * Now the files themselves need to be created.
2342 */
2343static __init int
2344early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2345{
2346 int ret;
2347
2348 mutex_lock(&event_mutex);
2349
2350 ret = create_event_toplevel_files(parent, tr);
2351 if (ret)
2352 goto out_unlock;
2353
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002354 down_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002355 __trace_early_add_event_dirs(tr);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002356 up_write(&trace_event_sem);
Steven Rostedt77248222013-02-27 16:28:06 -05002357
2358 out_unlock:
2359 mutex_unlock(&event_mutex);
2360
2361 return ret;
Steven Rostedtae63b312012-05-03 23:09:03 -04002362}
2363
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002364int event_trace_del_tracer(struct trace_array *tr)
2365{
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002366 mutex_lock(&event_mutex);
2367
Tom Zanussi85f2b082013-10-24 08:59:24 -05002368 /* Disable any event triggers and associated soft-disabled events */
2369 clear_event_triggers(tr);
2370
Steven Rostedt (Red Hat)2a6c24a2013-07-02 14:48:23 -04002371 /* Disable any running events */
2372 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
2373
Steven Rostedt3ccb0122013-12-03 12:41:20 -05002374 /* Access to events are within rcu_read_lock_sched() */
2375 synchronize_sched();
2376
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002377 down_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002378 __trace_remove_event_dirs(tr);
2379 debugfs_remove_recursive(tr->event_dir);
zhangwei(Jovi)52f6ad62013-03-11 15:14:03 +08002380 up_write(&trace_event_sem);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04002381
2382 tr->event_dir = NULL;
2383
2384 mutex_unlock(&event_mutex);
2385
2386 return 0;
2387}
2388
Steven Rostedtd1a29142013-02-27 20:23:57 -05002389static __init int event_trace_memsetup(void)
2390{
2391 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
2392 file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC);
2393 return 0;
2394}
2395
Ezequiel Garcia87819152012-09-12 11:47:57 -03002396static __init int event_trace_enable(void)
2397{
Steven Rostedtae63b312012-05-03 23:09:03 -04002398 struct trace_array *tr = top_trace_array();
Ezequiel Garcia87819152012-09-12 11:47:57 -03002399 struct ftrace_event_call **iter, *call;
2400 char *buf = bootup_event_buf;
2401 char *token;
2402 int ret;
2403
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002404 if (!tr)
2405 return -ENODEV;
2406
Ezequiel Garcia87819152012-09-12 11:47:57 -03002407 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
2408
2409 call = *iter;
2410 ret = event_init(call);
2411 if (!ret)
2412 list_add(&call->list, &ftrace_events);
2413 }
2414
Steven Rostedt77248222013-02-27 16:28:06 -05002415 /*
2416 * We need the top trace array to have a working set of trace
2417 * points at early init, before the debug files and directories
2418 * are created. Create the file entries now, and attach them
2419 * to the actual file dentries later.
2420 */
2421 __trace_early_add_events(tr);
2422
Ezequiel Garcia87819152012-09-12 11:47:57 -03002423 while (true) {
2424 token = strsep(&buf, ",");
2425
2426 if (!token)
2427 break;
2428 if (!*token)
2429 continue;
2430
Steven Rostedtae63b312012-05-03 23:09:03 -04002431 ret = ftrace_set_clr_event(tr, token, 1);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002432 if (ret)
2433 pr_warn("Failed to enable trace event: %s\n", token);
2434 }
Steven Rostedt81698832012-10-11 10:15:05 -04002435
2436 trace_printk_start_comm();
2437
Steven Rostedt (Red Hat)3cd715d2013-03-12 19:35:13 -04002438 register_event_cmds();
2439
Tom Zanussi85f2b082013-10-24 08:59:24 -05002440 register_trigger_cmds();
2441
Ezequiel Garcia87819152012-09-12 11:47:57 -03002442 return 0;
2443}
2444
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002445static __init int event_trace_init(void)
2446{
Steven Rostedtae63b312012-05-03 23:09:03 -04002447 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002448 struct dentry *d_tracer;
2449 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04002450 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002451
Steven Rostedtae63b312012-05-03 23:09:03 -04002452 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002453 if (!tr)
2454 return -ENODEV;
Steven Rostedtae63b312012-05-03 23:09:03 -04002455
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002456 d_tracer = tracing_init_dentry();
2457 if (!d_tracer)
2458 return 0;
2459
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002460 entry = debugfs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04002461 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002462 if (!entry)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002463 pr_warn("Could not create debugfs 'available_events' entry\n");
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002464
Li Zefan8728fe52010-05-24 16:22:49 +08002465 if (trace_define_common_fields())
Fabian Frederick3448bac2014-06-07 13:43:08 +02002466 pr_warn("tracing: Failed to allocate common fields");
Li Zefan8728fe52010-05-24 16:22:49 +08002467
Steven Rostedt77248222013-02-27 16:28:06 -05002468 ret = early_event_add_tracer(d_tracer, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002469 if (ret)
2470 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08002471
Oleg Nesterov836d4812013-07-31 19:31:37 +02002472#ifdef CONFIG_MODULES
Steven Rostedt6d723732009-04-10 14:53:50 -04002473 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08002474 if (ret)
Fabian Frederick3448bac2014-06-07 13:43:08 +02002475 pr_warn("Failed to register trace events module notifier\n");
Oleg Nesterov836d4812013-07-31 19:31:37 +02002476#endif
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002477 return 0;
2478}
Steven Rostedtd1a29142013-02-27 20:23:57 -05002479early_initcall(event_trace_memsetup);
Ezequiel Garcia87819152012-09-12 11:47:57 -03002480core_initcall(event_trace_enable);
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002481fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002482
2483#ifdef CONFIG_FTRACE_STARTUP_TEST
2484
2485static DEFINE_SPINLOCK(test_spinlock);
2486static DEFINE_SPINLOCK(test_spinlock_irq);
2487static DEFINE_MUTEX(test_mutex);
2488
2489static __init void test_work(struct work_struct *dummy)
2490{
2491 spin_lock(&test_spinlock);
2492 spin_lock_irq(&test_spinlock_irq);
2493 udelay(1);
2494 spin_unlock_irq(&test_spinlock_irq);
2495 spin_unlock(&test_spinlock);
2496
2497 mutex_lock(&test_mutex);
2498 msleep(1);
2499 mutex_unlock(&test_mutex);
2500}
2501
2502static __init int event_test_thread(void *unused)
2503{
2504 void *test_malloc;
2505
2506 test_malloc = kmalloc(1234, GFP_KERNEL);
2507 if (!test_malloc)
2508 pr_info("failed to kmalloc\n");
2509
2510 schedule_on_each_cpu(test_work);
2511
2512 kfree(test_malloc);
2513
2514 set_current_state(TASK_INTERRUPTIBLE);
2515 while (!kthread_should_stop())
2516 schedule();
2517
2518 return 0;
2519}
2520
2521/*
2522 * Do various things that may trigger events.
2523 */
2524static __init void event_test_stuff(void)
2525{
2526 struct task_struct *test_thread;
2527
2528 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2529 msleep(1);
2530 kthread_stop(test_thread);
2531}
2532
2533/*
2534 * For every trace event defined, we will test each trace point separately,
2535 * and then by groups, and finally all trace points.
2536 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002537static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04002538{
Steven Rostedtae63b312012-05-03 23:09:03 -04002539 struct ftrace_subsystem_dir *dir;
2540 struct ftrace_event_file *file;
Steven Rostedte6187002009-04-15 13:36:40 -04002541 struct ftrace_event_call *call;
2542 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04002543 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04002544 int ret;
2545
Steven Rostedtae63b312012-05-03 23:09:03 -04002546 tr = top_trace_array();
Yoshihiro YUNOMAEdc81e5e2014-06-06 07:35:17 +09002547 if (!tr)
2548 return;
Steven Rostedtae63b312012-05-03 23:09:03 -04002549
Steven Rostedte6187002009-04-15 13:36:40 -04002550 pr_info("Running tests on trace events:\n");
2551
Steven Rostedtae63b312012-05-03 23:09:03 -04002552 list_for_each_entry(file, &tr->events, list) {
2553
2554 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04002555
Steven Rostedt22392912010-04-21 12:27:06 -04002556 /* Only test those that have a probe */
2557 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04002558 continue;
2559
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002560/*
2561 * Testing syscall events here is pretty useless, but
2562 * we still do it if configured. But this is time consuming.
2563 * What we really need is a user thread to perform the
2564 * syscalls as we test.
2565 */
2566#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04002567 if (call->class->system &&
2568 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002569 continue;
2570#endif
2571
Mathieu Desnoyersde7b2972014-04-08 17:26:21 -04002572 pr_info("Testing event %s: ", ftrace_event_name(call));
Steven Rostedte6187002009-04-15 13:36:40 -04002573
2574 /*
2575 * If an event is already enabled, someone is using
2576 * it and the self test should not be on.
2577 */
Steven Rostedtae63b312012-05-03 23:09:03 -04002578 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002579 pr_warn("Enabled event during self test!\n");
Steven Rostedte6187002009-04-15 13:36:40 -04002580 WARN_ON_ONCE(1);
2581 continue;
2582 }
2583
Steven Rostedtae63b312012-05-03 23:09:03 -04002584 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002585 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04002586 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002587
2588 pr_cont("OK\n");
2589 }
2590
2591 /* Now test at the sub system level */
2592
2593 pr_info("Running tests on trace event systems:\n");
2594
Steven Rostedtae63b312012-05-03 23:09:03 -04002595 list_for_each_entry(dir, &tr->systems, list) {
2596
2597 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04002598
2599 /* the ftrace system is special, skip it */
2600 if (strcmp(system->name, "ftrace") == 0)
2601 continue;
2602
2603 pr_info("Testing event system %s: ", system->name);
2604
Steven Rostedtae63b312012-05-03 23:09:03 -04002605 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002606 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002607 pr_warn("error enabling system %s\n",
2608 system->name);
Steven Rostedte6187002009-04-15 13:36:40 -04002609 continue;
2610 }
2611
2612 event_test_stuff();
2613
Steven Rostedtae63b312012-05-03 23:09:03 -04002614 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002615 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002616 pr_warn("error disabling system %s\n",
2617 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002618 continue;
2619 }
Steven Rostedte6187002009-04-15 13:36:40 -04002620
2621 pr_cont("OK\n");
2622 }
2623
2624 /* Test with all events enabled */
2625
2626 pr_info("Running tests on all trace events:\n");
2627 pr_info("Testing all events: ");
2628
Steven Rostedtae63b312012-05-03 23:09:03 -04002629 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002630 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002631 pr_warn("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002632 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002633 }
2634
2635 event_test_stuff();
2636
2637 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04002638 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002639 if (WARN_ON_ONCE(ret)) {
Fabian Frederick3448bac2014-06-07 13:43:08 +02002640 pr_warn("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002641 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002642 }
2643
2644 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002645}
2646
2647#ifdef CONFIG_FUNCTION_TRACER
2648
Tejun Heo245b2e72009-06-24 15:13:48 +09002649static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002650
2651static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04002652function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04002653 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002654{
2655 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002656 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002657 struct ftrace_entry *entry;
2658 unsigned long flags;
2659 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002660 int cpu;
2661 int pc;
2662
2663 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002664 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002665 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09002666 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002667
2668 if (disabled != 1)
2669 goto out;
2670
2671 local_save_flags(flags);
2672
Steven Rostedte77405a2009-09-02 14:17:06 -04002673 event = trace_current_buffer_lock_reserve(&buffer,
2674 TRACE_FN, sizeof(*entry),
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002675 flags, pc);
2676 if (!event)
2677 goto out;
2678 entry = ring_buffer_event_data(event);
2679 entry->ip = ip;
2680 entry->parent_ip = parent_ip;
2681
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002682 trace_buffer_unlock_commit(buffer, event, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002683
2684 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09002685 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04002686 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002687}
2688
2689static struct ftrace_ops trace_ops __initdata =
2690{
2691 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04002692 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002693};
2694
2695static __init void event_trace_self_test_with_function(void)
2696{
Steven Rostedt17bb6152011-05-23 15:27:46 -04002697 int ret;
2698 ret = register_ftrace_function(&trace_ops);
2699 if (WARN_ON(ret < 0)) {
2700 pr_info("Failed to enable function tracer for event tests\n");
2701 return;
2702 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002703 pr_info("Running tests again, along with the function tracer\n");
2704 event_trace_self_tests();
2705 unregister_ftrace_function(&trace_ops);
2706}
2707#else
2708static __init void event_trace_self_test_with_function(void)
2709{
2710}
2711#endif
2712
2713static __init int event_trace_self_tests_init(void)
2714{
Li Zefan020e5f82009-07-01 10:47:05 +08002715 if (!tracing_selftest_disabled) {
2716 event_trace_self_tests();
2717 event_trace_self_test_with_function();
2718 }
Steven Rostedte6187002009-04-15 13:36:40 -04002719
2720 return 0;
2721}
2722
Steven Rostedt28d20e22009-04-20 12:12:44 -04002723late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002724
2725#endif