blob: 58a61302a733d5acda781ce8b42ea2842ba9336c [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Steven Rostedte6187002009-04-15 13:36:40 -040011#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040019#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050020
Li Zefan020e5f82009-07-01 10:47:05 +080021#include <asm/setup.h>
22
Steven Rostedt91729ef2009-03-02 15:03:01 -050023#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050024
Steven Rostedt4e5292e2009-09-12 19:26:21 -040025#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050026#define TRACE_SYSTEM "TRACE_SYSTEM"
27
Li Zefan20c89282009-05-06 10:33:45 +080028DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050029
Steven Rostedt04295782010-11-12 22:32:11 -050030DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
Steven Rostedta59fd602009-04-10 13:52:20 -040036LIST_HEAD(ftrace_events);
Li Zefan8728fe52010-05-24 16:22:49 +080037LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040038
Steven Rostedtae63b312012-05-03 23:09:03 -040039/* Double loops, do not use break, only goto's work */
40#define do_for_each_event_file(tr, file) \
41 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
42 list_for_each_entry(file, &tr->events, list)
43
44#define do_for_each_event_file_safe(tr, file) \
45 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
46 struct ftrace_event_file *___n; \
47 list_for_each_entry_safe(file, ___n, &tr->events, list)
48
49#define while_for_each_event_file() \
50 }
51
Steven Rostedt2e33af02010-04-22 10:35:55 -040052struct list_head *
53trace_get_fields(struct ftrace_event_call *event_call)
54{
55 if (!event_call->class->get_fields)
56 return &event_call->class->fields;
57 return event_call->class->get_fields(event_call);
58}
59
Li Zefan8728fe52010-05-24 16:22:49 +080060static int __trace_define_field(struct list_head *head, const char *type,
61 const char *name, int offset, int size,
62 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -050063{
64 struct ftrace_event_field *field;
65
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010066 field = kzalloc(sizeof(*field), GFP_KERNEL);
Tom Zanussicf027f62009-03-22 03:30:39 -050067 if (!field)
68 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010069
Tom Zanussicf027f62009-03-22 03:30:39 -050070 field->name = kstrdup(name, GFP_KERNEL);
71 if (!field->name)
72 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010073
Tom Zanussicf027f62009-03-22 03:30:39 -050074 field->type = kstrdup(type, GFP_KERNEL);
75 if (!field->type)
76 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010077
Li Zefan43b51ea2009-08-07 10:33:22 +080078 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
Tom Zanussicf027f62009-03-22 03:30:39 -050083 field->offset = offset;
84 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -050085 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +080086
Steven Rostedt2e33af02010-04-22 10:35:55 -040087 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -050088
89 return 0;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010090
Tom Zanussicf027f62009-03-22 03:30:39 -050091err:
Wenji Huang7b60997f2010-02-24 15:40:26 +080092 if (field)
Tom Zanussicf027f62009-03-22 03:30:39 -050093 kfree(field->name);
Tom Zanussicf027f62009-03-22 03:30:39 -050094 kfree(field);
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010095
Tom Zanussicf027f62009-03-22 03:30:39 -050096 return -ENOMEM;
97}
Li Zefan8728fe52010-05-24 16:22:49 +080098
99int trace_define_field(struct ftrace_event_call *call, const char *type,
100 const char *name, int offset, int size, int is_signed,
101 int filter_type)
102{
103 struct list_head *head;
104
105 if (WARN_ON(!call->class))
106 return 0;
107
108 head = trace_get_fields(call);
109 return __trace_define_field(head, type, name, offset, size,
110 is_signed, filter_type);
111}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400112EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500113
Li Zefane647d6b2009-08-19 15:54:32 +0800114#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800115 ret = __trace_define_field(&ftrace_common_fields, #type, \
116 "common_" #item, \
117 offsetof(typeof(ent), item), \
118 sizeof(ent.item), \
119 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800120 if (ret) \
121 return ret;
122
Li Zefan8728fe52010-05-24 16:22:49 +0800123static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800124{
125 int ret;
126 struct trace_entry ent;
127
128 __common_field(unsigned short, type);
129 __common_field(unsigned char, flags);
130 __common_field(unsigned char, preempt_count);
131 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800132
133 return ret;
134}
135
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400136void trace_destroy_fields(struct ftrace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800137{
138 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400139 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800140
Steven Rostedt2e33af02010-04-22 10:35:55 -0400141 head = trace_get_fields(call);
142 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800143 list_del(&field->link);
144 kfree(field->type);
145 kfree(field->name);
146 kfree(field);
147 }
148}
149
Li Zefan87d9b4e2009-12-08 11:14:20 +0800150int trace_event_raw_init(struct ftrace_event_call *call)
151{
152 int id;
153
Steven Rostedt80decc72010-04-23 10:00:22 -0400154 id = register_ftrace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800155 if (!id)
156 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800157
158 return 0;
159}
160EXPORT_SYMBOL_GPL(trace_event_raw_init);
161
Jiri Olsaceec0b62012-02-15 15:51:49 +0100162int ftrace_event_reg(struct ftrace_event_call *call,
163 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400164{
Steven Rostedtae63b312012-05-03 23:09:03 -0400165 struct ftrace_event_file *file = data;
166
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400167 switch (type) {
168 case TRACE_REG_REGISTER:
169 return tracepoint_probe_register(call->name,
170 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400171 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400172 case TRACE_REG_UNREGISTER:
173 tracepoint_probe_unregister(call->name,
174 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400175 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400176 return 0;
177
178#ifdef CONFIG_PERF_EVENTS
179 case TRACE_REG_PERF_REGISTER:
180 return tracepoint_probe_register(call->name,
181 call->class->perf_probe,
182 call);
183 case TRACE_REG_PERF_UNREGISTER:
184 tracepoint_probe_unregister(call->name,
185 call->class->perf_probe,
186 call);
187 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100188 case TRACE_REG_PERF_OPEN:
189 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100190 case TRACE_REG_PERF_ADD:
191 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100192 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400193#endif
194 }
195 return 0;
196}
197EXPORT_SYMBOL_GPL(ftrace_event_reg);
198
Li Zefane870e9a2010-07-02 11:07:32 +0800199void trace_event_enable_cmd_record(bool enable)
200{
Steven Rostedtae63b312012-05-03 23:09:03 -0400201 struct ftrace_event_file *file;
202 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800203
204 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400205 do_for_each_event_file(tr, file) {
206
207 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800208 continue;
209
210 if (enable) {
211 tracing_start_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400212 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800213 } else {
214 tracing_stop_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400215 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800216 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400217 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800218 mutex_unlock(&event_mutex);
219}
220
Steven Rostedtae63b312012-05-03 23:09:03 -0400221static int ftrace_event_enable_disable(struct ftrace_event_file *file,
222 int enable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500223{
Steven Rostedtae63b312012-05-03 23:09:03 -0400224 struct ftrace_event_call *call = file->event_call;
Li Zefan3b8e4272009-12-08 11:14:52 +0800225 int ret = 0;
226
Steven Rostedtfd994982009-02-28 02:41:25 -0500227 switch (enable) {
228 case 0:
Steven Rostedtae63b312012-05-03 23:09:03 -0400229 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
230 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
231 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800232 tracing_stop_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400233 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800234 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400235 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500236 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500237 break;
238 case 1:
Steven Rostedtae63b312012-05-03 23:09:03 -0400239 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
Li Zefane870e9a2010-07-02 11:07:32 +0800240 if (trace_flags & TRACE_ITER_RECORD_CMD) {
241 tracing_start_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400242 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800243 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400244 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800245 if (ret) {
246 tracing_stop_cmdline_record();
247 pr_info("event trace: Could not enable event "
248 "%s\n", call->name);
249 break;
250 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400251 file->flags |= FTRACE_EVENT_FL_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500252 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500253 break;
254 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800255
256 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500257}
258
Steven Rostedtae63b312012-05-03 23:09:03 -0400259static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800260{
Steven Rostedtae63b312012-05-03 23:09:03 -0400261 struct ftrace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800262
263 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400264 list_for_each_entry(file, &tr->events, list) {
265 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800266 }
267 mutex_unlock(&event_mutex);
268}
269
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400270static void __put_system(struct event_subsystem *system)
271{
272 struct event_filter *filter = system->filter;
273
274 WARN_ON_ONCE(system->ref_count == 0);
275 if (--system->ref_count)
276 return;
277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278 list_del(&system->list);
279
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400280 if (filter) {
281 kfree(filter->filter_string);
282 kfree(filter);
283 }
284 kfree(system->name);
285 kfree(system);
286}
287
288static void __get_system(struct event_subsystem *system)
289{
290 WARN_ON_ONCE(system->ref_count == 0);
291 system->ref_count++;
292}
293
Steven Rostedtae63b312012-05-03 23:09:03 -0400294static void __get_system_dir(struct ftrace_subsystem_dir *dir)
295{
296 WARN_ON_ONCE(dir->ref_count == 0);
297 dir->ref_count++;
298 __get_system(dir->subsystem);
299}
300
301static void __put_system_dir(struct ftrace_subsystem_dir *dir)
302{
303 WARN_ON_ONCE(dir->ref_count == 0);
304 /* If the subsystem is about to be freed, the dir must be too */
305 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
306
307 __put_system(dir->subsystem);
308 if (!--dir->ref_count)
309 kfree(dir);
310}
311
312static void put_system(struct ftrace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400313{
314 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400315 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400316 mutex_unlock(&event_mutex);
317}
318
Li Zefan8f31bfe2009-05-08 10:31:42 +0800319/*
320 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
321 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400322static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
323 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500324{
Steven Rostedtae63b312012-05-03 23:09:03 -0400325 struct ftrace_event_file *file;
Steven Rostedta59fd602009-04-10 13:52:20 -0400326 struct ftrace_event_call *call;
Steven Rostedt29f93942009-05-08 16:06:47 -0400327 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500328
Steven Rostedt11a241a2009-03-02 11:49:04 -0500329 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400330 list_for_each_entry(file, &tr->events, list) {
331
332 call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500333
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400334 if (!call->name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500335 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500336
Steven Rostedt9b637762012-05-10 15:55:43 -0400337 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
338 continue;
339
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500340 if (match &&
341 strcmp(match, call->name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400342 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500343 continue;
344
Steven Rostedt8f082012010-04-20 10:47:33 -0400345 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500346 continue;
347
348 if (event && strcmp(event, call->name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500349 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500350
Steven Rostedtae63b312012-05-03 23:09:03 -0400351 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500352
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500353 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500354 }
Steven Rostedt11a241a2009-03-02 11:49:04 -0500355 mutex_unlock(&event_mutex);
356
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500357 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500358}
359
Steven Rostedtae63b312012-05-03 23:09:03 -0400360static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800361{
362 char *event = NULL, *sub = NULL, *match;
363
364 /*
365 * The buf format can be <subsystem>:<event-name>
366 * *:<event-name> means any event by that name.
367 * :<event-name> is the same.
368 *
369 * <subsystem>:* means all events in that subsystem
370 * <subsystem>: means the same.
371 *
372 * <name> (no ':') means all events in a subsystem with
373 * the name <name> or any event that matches <name>
374 */
375
376 match = strsep(&buf, ":");
377 if (buf) {
378 sub = match;
379 event = buf;
380 match = NULL;
381
382 if (!strlen(sub) || strcmp(sub, "*") == 0)
383 sub = NULL;
384 if (!strlen(event) || strcmp(event, "*") == 0)
385 event = NULL;
386 }
387
Steven Rostedtae63b312012-05-03 23:09:03 -0400388 return __ftrace_set_clr_event(tr, match, sub, event, set);
Li Zefan8f31bfe2009-05-08 10:31:42 +0800389}
390
Steven Rostedt4671c792009-05-08 16:27:41 -0400391/**
392 * trace_set_clr_event - enable or disable an event
393 * @system: system name to match (NULL for any system)
394 * @event: event name to match (NULL for all events, within system)
395 * @set: 1 to enable, 0 to disable
396 *
397 * This is a way for other parts of the kernel to enable or disable
398 * event recording.
399 *
400 * Returns 0 on success, -EINVAL if the parameters do not match any
401 * registered events.
402 */
403int trace_set_clr_event(const char *system, const char *event, int set)
404{
Steven Rostedtae63b312012-05-03 23:09:03 -0400405 struct trace_array *tr = top_trace_array();
406
407 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400408}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800409EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400410
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500411/* 128 should be much more than enough */
412#define EVENT_BUF_SIZE 127
413
414static ssize_t
415ftrace_event_write(struct file *file, const char __user *ubuf,
416 size_t cnt, loff_t *ppos)
417{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200418 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400419 struct seq_file *m = file->private_data;
420 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800421 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500422
Li Zefan4ba79782009-09-22 13:52:20 +0800423 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500424 return 0;
425
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400426 ret = tracing_update_buffers();
427 if (ret < 0)
428 return ret;
429
jolsa@redhat.com48966362009-09-11 17:29:28 +0200430 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500431 return -ENOMEM;
432
jolsa@redhat.com48966362009-09-11 17:29:28 +0200433 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500434
Li Zefan4ba79782009-09-22 13:52:20 +0800435 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200436 int set = 1;
437
438 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500439 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500440
jolsa@redhat.com48966362009-09-11 17:29:28 +0200441 parser.buffer[parser.idx] = 0;
442
Steven Rostedtae63b312012-05-03 23:09:03 -0400443 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500444 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200445 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500446 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500447
448 ret = read;
449
jolsa@redhat.com48966362009-09-11 17:29:28 +0200450 out_put:
451 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500452
453 return ret;
454}
455
456static void *
457t_next(struct seq_file *m, void *v, loff_t *pos)
458{
Steven Rostedtae63b312012-05-03 23:09:03 -0400459 struct ftrace_event_file *file = v;
460 struct ftrace_event_call *call;
461 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500462
463 (*pos)++;
464
Steven Rostedtae63b312012-05-03 23:09:03 -0400465 list_for_each_entry_continue(file, &tr->events, list) {
466 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400467 /*
468 * The ftrace subsystem is for showing formats only.
469 * They can not be enabled or disabled via the event files.
470 */
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400471 if (call->class && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -0400472 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400473 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500474
Li Zefan30bd39c2009-09-18 14:07:05 +0800475 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500476}
477
478static void *t_start(struct seq_file *m, loff_t *pos)
479{
Steven Rostedtae63b312012-05-03 23:09:03 -0400480 struct ftrace_event_file *file;
481 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800482 loff_t l;
483
Li Zefan20c89282009-05-06 10:33:45 +0800484 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800485
Steven Rostedtae63b312012-05-03 23:09:03 -0400486 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800487 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400488 file = t_next(m, file, &l);
489 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800490 break;
491 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400492 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500493}
494
495static void *
496s_next(struct seq_file *m, void *v, loff_t *pos)
497{
Steven Rostedtae63b312012-05-03 23:09:03 -0400498 struct ftrace_event_file *file = v;
499 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500500
501 (*pos)++;
502
Steven Rostedtae63b312012-05-03 23:09:03 -0400503 list_for_each_entry_continue(file, &tr->events, list) {
504 if (file->flags & FTRACE_EVENT_FL_ENABLED)
505 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500506 }
507
Li Zefan30bd39c2009-09-18 14:07:05 +0800508 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500509}
510
511static void *s_start(struct seq_file *m, loff_t *pos)
512{
Steven Rostedtae63b312012-05-03 23:09:03 -0400513 struct ftrace_event_file *file;
514 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800515 loff_t l;
516
Li Zefan20c89282009-05-06 10:33:45 +0800517 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800518
Steven Rostedtae63b312012-05-03 23:09:03 -0400519 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800520 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400521 file = s_next(m, file, &l);
522 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800523 break;
524 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400525 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500526}
527
528static int t_show(struct seq_file *m, void *v)
529{
Steven Rostedtae63b312012-05-03 23:09:03 -0400530 struct ftrace_event_file *file = v;
531 struct ftrace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500532
Steven Rostedt8f082012010-04-20 10:47:33 -0400533 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
534 seq_printf(m, "%s:", call->class->system);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500535 seq_printf(m, "%s\n", call->name);
536
537 return 0;
538}
539
540static void t_stop(struct seq_file *m, void *p)
541{
Li Zefan20c89282009-05-06 10:33:45 +0800542 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500543}
544
Steven Rostedt1473e442009-02-24 14:15:08 -0500545static ssize_t
546event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
547 loff_t *ppos)
548{
Steven Rostedtae63b312012-05-03 23:09:03 -0400549 struct ftrace_event_file *file = filp->private_data;
Steven Rostedt1473e442009-02-24 14:15:08 -0500550 char *buf;
551
Steven Rostedtae63b312012-05-03 23:09:03 -0400552 if (file->flags & FTRACE_EVENT_FL_ENABLED)
Steven Rostedt1473e442009-02-24 14:15:08 -0500553 buf = "1\n";
554 else
555 buf = "0\n";
556
557 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
558}
559
560static ssize_t
561event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
562 loff_t *ppos)
563{
Steven Rostedtae63b312012-05-03 23:09:03 -0400564 struct ftrace_event_file *file = filp->private_data;
Steven Rostedt1473e442009-02-24 14:15:08 -0500565 unsigned long val;
566 int ret;
567
Steven Rostedtae63b312012-05-03 23:09:03 -0400568 if (!file)
569 return -EINVAL;
570
Peter Huewe22fe9b52011-06-07 21:58:27 +0200571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
572 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -0500573 return ret;
574
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400575 ret = tracing_update_buffers();
576 if (ret < 0)
577 return ret;
578
Steven Rostedt1473e442009-02-24 14:15:08 -0500579 switch (val) {
580 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -0500581 case 1:
Steven Rostedt11a241a2009-03-02 11:49:04 -0500582 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400583 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500584 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -0500585 break;
586
587 default:
588 return -EINVAL;
589 }
590
591 *ppos += cnt;
592
Li Zefan3b8e4272009-12-08 11:14:52 +0800593 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -0500594}
595
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400596static ssize_t
597system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
598 loff_t *ppos)
599{
Li Zefanc142b152009-05-08 10:32:05 +0800600 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedtae63b312012-05-03 23:09:03 -0400601 struct ftrace_subsystem_dir *dir = filp->private_data;
602 struct event_subsystem *system = dir->subsystem;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400603 struct ftrace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400604 struct ftrace_event_file *file;
605 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400606 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +0800607 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400608 int ret;
609
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400610 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400611 list_for_each_entry(file, &tr->events, list) {
612 call = file->event_call;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400613 if (!call->name || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400614 continue;
615
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400616 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400617 continue;
618
619 /*
620 * We need to find out if all the events are set
621 * or if all events or cleared, or if we have
622 * a mixture.
623 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400624 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +0800625
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400626 /*
627 * If we have a mixture, no need to look further.
628 */
Li Zefanc142b152009-05-08 10:32:05 +0800629 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400630 break;
631 }
632 mutex_unlock(&event_mutex);
633
Li Zefanc142b152009-05-08 10:32:05 +0800634 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400635 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400636
637 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
638
639 return ret;
640}
641
642static ssize_t
643system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
645{
Steven Rostedtae63b312012-05-03 23:09:03 -0400646 struct ftrace_subsystem_dir *dir = filp->private_data;
647 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400648 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400649 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400650 ssize_t ret;
651
Peter Huewe22fe9b52011-06-07 21:58:27 +0200652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
653 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400654 return ret;
655
656 ret = tracing_update_buffers();
657 if (ret < 0)
658 return ret;
659
Li Zefan8f31bfe2009-05-08 10:31:42 +0800660 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400661 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400662
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400663 /*
664 * Opening of "enable" adds a ref count to system,
665 * so the name is safe to use.
666 */
667 if (system)
668 name = system->name;
669
Steven Rostedtae63b312012-05-03 23:09:03 -0400670 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400671 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800672 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400673
674 ret = cnt;
675
Li Zefan8f31bfe2009-05-08 10:31:42 +0800676out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400677 *ppos += cnt;
678
679 return ret;
680}
681
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400682enum {
683 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +0800684 FORMAT_FIELD_SEPERATOR = 2,
685 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400686};
687
688static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -0500689{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400690 struct ftrace_event_call *call = m->private;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800691 struct ftrace_event_field *field;
Li Zefan86397dc2010-08-17 13:53:06 +0800692 struct list_head *common_head = &ftrace_common_fields;
693 struct list_head *head = trace_get_fields(call);
Steven Rostedt981d0812009-03-02 13:53:59 -0500694
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400695 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800696
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400697 switch ((unsigned long)v) {
698 case FORMAT_HEADER:
Li Zefan86397dc2010-08-17 13:53:06 +0800699 if (unlikely(list_empty(common_head)))
700 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800701
Li Zefan86397dc2010-08-17 13:53:06 +0800702 field = list_entry(common_head->prev,
703 struct ftrace_event_field, link);
704 return field;
705
706 case FORMAT_FIELD_SEPERATOR:
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400707 if (unlikely(list_empty(head)))
708 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800709
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400710 field = list_entry(head->prev, struct ftrace_event_field, link);
711 return field;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800712
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400713 case FORMAT_PRINTFMT:
714 /* all done */
715 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800716 }
717
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400718 field = v;
Li Zefan86397dc2010-08-17 13:53:06 +0800719 if (field->link.prev == common_head)
720 return (void *)FORMAT_FIELD_SEPERATOR;
721 else if (field->link.prev == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400722 return (void *)FORMAT_PRINTFMT;
723
724 field = list_entry(field->link.prev, struct ftrace_event_field, link);
725
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400726 return field;
Li Zefan8728fe52010-05-24 16:22:49 +0800727}
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800728
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400729static void *f_start(struct seq_file *m, loff_t *pos)
Li Zefan8728fe52010-05-24 16:22:49 +0800730{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400731 loff_t l = 0;
732 void *p;
Li Zefan8728fe52010-05-24 16:22:49 +0800733
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400734 /* Start by showing the header */
735 if (!*pos)
736 return (void *)FORMAT_HEADER;
737
738 p = (void *)FORMAT_HEADER;
739 do {
740 p = f_next(m, p, &l);
741 } while (p && l < *pos);
742
743 return p;
744}
745
746static int f_show(struct seq_file *m, void *v)
747{
748 struct ftrace_event_call *call = m->private;
749 struct ftrace_event_field *field;
750 const char *array_descriptor;
751
752 switch ((unsigned long)v) {
753 case FORMAT_HEADER:
754 seq_printf(m, "name: %s\n", call->name);
755 seq_printf(m, "ID: %d\n", call->event.type);
756 seq_printf(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +0800757 return 0;
758
Li Zefan86397dc2010-08-17 13:53:06 +0800759 case FORMAT_FIELD_SEPERATOR:
760 seq_putc(m, '\n');
761 return 0;
762
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400763 case FORMAT_PRINTFMT:
764 seq_printf(m, "\nprint fmt: %s\n",
765 call->print_fmt);
766 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500767 }
768
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400769 field = v;
770
771 /*
772 * Smartly shows the array type(except dynamic array).
773 * Normal:
774 * field:TYPE VAR
775 * If TYPE := TYPE[LEN], it is shown:
776 * field:TYPE VAR[LEN]
777 */
778 array_descriptor = strchr(field->type, '[');
779
780 if (!strncmp(field->type, "__data_loc", 10))
781 array_descriptor = NULL;
782
783 if (!array_descriptor)
784 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
785 field->type, field->name, field->offset,
786 field->size, !!field->is_signed);
787 else
788 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
789 (int)(array_descriptor - field->type),
790 field->type, field->name,
791 array_descriptor, field->offset,
792 field->size, !!field->is_signed);
793
794 return 0;
795}
796
797static void f_stop(struct seq_file *m, void *p)
798{
799}
800
801static const struct seq_operations trace_format_seq_ops = {
802 .start = f_start,
803 .next = f_next,
804 .stop = f_stop,
805 .show = f_show,
806};
807
808static int trace_format_open(struct inode *inode, struct file *file)
809{
810 struct ftrace_event_call *call = inode->i_private;
811 struct seq_file *m;
812 int ret;
813
814 ret = seq_open(file, &trace_format_seq_ops);
815 if (ret < 0)
816 return ret;
817
818 m = file->private_data;
819 m->private = call;
820
821 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500822}
823
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100824static ssize_t
825event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
826{
827 struct ftrace_event_call *call = filp->private_data;
828 struct trace_seq *s;
829 int r;
830
831 if (*ppos)
832 return 0;
833
834 s = kmalloc(sizeof(*s), GFP_KERNEL);
835 if (!s)
836 return -ENOMEM;
837
838 trace_seq_init(s);
Steven Rostedt32c0eda2010-04-23 10:38:03 -0400839 trace_seq_printf(s, "%d\n", call->event.type);
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100840
841 r = simple_read_from_buffer(ubuf, cnt, ppos,
842 s->buffer, s->len);
843 kfree(s);
844 return r;
845}
846
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500847static ssize_t
848event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
849 loff_t *ppos)
850{
851 struct ftrace_event_call *call = filp->private_data;
852 struct trace_seq *s;
853 int r;
854
855 if (*ppos)
856 return 0;
857
858 s = kmalloc(sizeof(*s), GFP_KERNEL);
859 if (!s)
860 return -ENOMEM;
861
862 trace_seq_init(s);
863
Tom Zanussi8b372562009-04-28 03:04:59 -0500864 print_event_filter(call, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -0500865 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500866
867 kfree(s);
868
869 return r;
870}
871
872static ssize_t
873event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
874 loff_t *ppos)
875{
876 struct ftrace_event_call *call = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -0500877 char *buf;
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500878 int err;
879
Tom Zanussi8b372562009-04-28 03:04:59 -0500880 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500881 return -EINVAL;
882
Tom Zanussi8b372562009-04-28 03:04:59 -0500883 buf = (char *)__get_free_page(GFP_TEMPORARY);
884 if (!buf)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500885 return -ENOMEM;
886
Tom Zanussi8b372562009-04-28 03:04:59 -0500887 if (copy_from_user(buf, ubuf, cnt)) {
888 free_page((unsigned long) buf);
889 return -EFAULT;
890 }
891 buf[cnt] = '\0';
892
893 err = apply_event_filter(call, buf);
894 free_page((unsigned long) buf);
895 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500896 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -0500897
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500898 *ppos += cnt;
899
900 return cnt;
901}
902
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400903static LIST_HEAD(event_subsystems);
904
905static int subsystem_open(struct inode *inode, struct file *filp)
906{
907 struct event_subsystem *system = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -0400908 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
909 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400910 int ret;
911
912 /* Make sure the system still exists */
913 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400914 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
915 list_for_each_entry(dir, &tr->systems, list) {
916 if (dir == inode->i_private) {
917 /* Don't open systems with no events */
918 if (dir->nr_events) {
919 __get_system_dir(dir);
920 system = dir->subsystem;
921 }
922 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400923 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400924 }
925 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400926 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400927 mutex_unlock(&event_mutex);
928
Steven Rostedtae63b312012-05-03 23:09:03 -0400929 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400930 return -ENODEV;
931
Steven Rostedtae63b312012-05-03 23:09:03 -0400932 /* Some versions of gcc think dir can be uninitialized here */
933 WARN_ON(!dir);
934
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400935 ret = tracing_open_generic(inode, filp);
Steven Rostedtae63b312012-05-03 23:09:03 -0400936 if (ret < 0)
937 put_system(dir);
938
939 return ret;
940}
941
942static int system_tr_open(struct inode *inode, struct file *filp)
943{
944 struct ftrace_subsystem_dir *dir;
945 struct trace_array *tr = inode->i_private;
946 int ret;
947
948 /* Make a temporary dir that has no system but points to tr */
949 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
950 if (!dir)
951 return -ENOMEM;
952
953 dir->tr = tr;
954
955 ret = tracing_open_generic(inode, filp);
956 if (ret < 0)
957 kfree(dir);
958
959 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400960
961 return ret;
962}
963
964static int subsystem_release(struct inode *inode, struct file *file)
965{
Steven Rostedtae63b312012-05-03 23:09:03 -0400966 struct ftrace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400967
Steven Rostedtae63b312012-05-03 23:09:03 -0400968 /*
969 * If dir->subsystem is NULL, then this is a temporary
970 * descriptor that was made for a trace_array to enable
971 * all subsystems.
972 */
973 if (dir->subsystem)
974 put_system(dir);
975 else
976 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400977
978 return 0;
979}
980
Tom Zanussicfb180f2009-03-22 03:31:17 -0500981static ssize_t
982subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
983 loff_t *ppos)
984{
Steven Rostedtae63b312012-05-03 23:09:03 -0400985 struct ftrace_subsystem_dir *dir = filp->private_data;
986 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500987 struct trace_seq *s;
988 int r;
989
990 if (*ppos)
991 return 0;
992
993 s = kmalloc(sizeof(*s), GFP_KERNEL);
994 if (!s)
995 return -ENOMEM;
996
997 trace_seq_init(s);
998
Tom Zanussi8b372562009-04-28 03:04:59 -0500999 print_subsystem_event_filter(system, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -05001000 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001001
1002 kfree(s);
1003
1004 return r;
1005}
1006
1007static ssize_t
1008subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1009 loff_t *ppos)
1010{
Steven Rostedtae63b312012-05-03 23:09:03 -04001011 struct ftrace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001012 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001013 int err;
1014
Tom Zanussi8b372562009-04-28 03:04:59 -05001015 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001016 return -EINVAL;
1017
Tom Zanussi8b372562009-04-28 03:04:59 -05001018 buf = (char *)__get_free_page(GFP_TEMPORARY);
1019 if (!buf)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001020 return -ENOMEM;
1021
Tom Zanussi8b372562009-04-28 03:04:59 -05001022 if (copy_from_user(buf, ubuf, cnt)) {
1023 free_page((unsigned long) buf);
1024 return -EFAULT;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001025 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001026 buf[cnt] = '\0';
Tom Zanussicfb180f2009-03-22 03:31:17 -05001027
Steven Rostedtae63b312012-05-03 23:09:03 -04001028 err = apply_subsystem_event_filter(dir, buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001029 free_page((unsigned long) buf);
1030 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001031 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001032
1033 *ppos += cnt;
1034
1035 return cnt;
1036}
1037
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001038static ssize_t
1039show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1040{
1041 int (*func)(struct trace_seq *s) = filp->private_data;
1042 struct trace_seq *s;
1043 int r;
1044
1045 if (*ppos)
1046 return 0;
1047
1048 s = kmalloc(sizeof(*s), GFP_KERNEL);
1049 if (!s)
1050 return -ENOMEM;
1051
1052 trace_seq_init(s);
1053
1054 func(s);
1055 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1056
1057 kfree(s);
1058
1059 return r;
1060}
1061
Steven Rostedt15075ca2012-05-03 14:57:28 -04001062static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1063static int ftrace_event_set_open(struct inode *inode, struct file *file);
1064
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001065static const struct seq_operations show_event_seq_ops = {
1066 .start = t_start,
1067 .next = t_next,
1068 .show = t_show,
1069 .stop = t_stop,
1070};
1071
1072static const struct seq_operations show_set_event_seq_ops = {
1073 .start = s_start,
1074 .next = s_next,
1075 .show = t_show,
1076 .stop = t_stop,
1077};
1078
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001079static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001080 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001081 .read = seq_read,
1082 .llseek = seq_lseek,
1083 .release = seq_release,
1084};
1085
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001086static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001087 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001088 .read = seq_read,
1089 .write = ftrace_event_write,
1090 .llseek = seq_lseek,
1091 .release = seq_release,
1092};
1093
Steven Rostedt1473e442009-02-24 14:15:08 -05001094static const struct file_operations ftrace_enable_fops = {
1095 .open = tracing_open_generic,
1096 .read = event_enable_read,
1097 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001098 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001099};
1100
Steven Rostedt981d0812009-03-02 13:53:59 -05001101static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001102 .open = trace_format_open,
1103 .read = seq_read,
1104 .llseek = seq_lseek,
1105 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001106};
1107
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001108static const struct file_operations ftrace_event_id_fops = {
1109 .open = tracing_open_generic,
1110 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001111 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001112};
1113
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001114static const struct file_operations ftrace_event_filter_fops = {
1115 .open = tracing_open_generic,
1116 .read = event_filter_read,
1117 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001118 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001119};
1120
Tom Zanussicfb180f2009-03-22 03:31:17 -05001121static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001122 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001123 .read = subsystem_filter_read,
1124 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001125 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001126 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001127};
1128
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001129static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001130 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001131 .read = system_enable_read,
1132 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001133 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001134 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001135};
1136
Steven Rostedtae63b312012-05-03 23:09:03 -04001137static const struct file_operations ftrace_tr_enable_fops = {
1138 .open = system_tr_open,
1139 .read = system_enable_read,
1140 .write = system_enable_write,
1141 .llseek = default_llseek,
1142 .release = subsystem_release,
1143};
1144
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001145static const struct file_operations ftrace_show_header_fops = {
1146 .open = tracing_open_generic,
1147 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001148 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001149};
1150
Steven Rostedtae63b312012-05-03 23:09:03 -04001151static int
1152ftrace_event_open(struct inode *inode, struct file *file,
1153 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001154{
Steven Rostedtae63b312012-05-03 23:09:03 -04001155 struct seq_file *m;
1156 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001157
Steven Rostedtae63b312012-05-03 23:09:03 -04001158 ret = seq_open(file, seq_ops);
1159 if (ret < 0)
1160 return ret;
1161 m = file->private_data;
1162 /* copy tr over to seq ops */
1163 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001164
Steven Rostedtae63b312012-05-03 23:09:03 -04001165 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001166}
1167
Steven Rostedt15075ca2012-05-03 14:57:28 -04001168static int
1169ftrace_event_avail_open(struct inode *inode, struct file *file)
1170{
1171 const struct seq_operations *seq_ops = &show_event_seq_ops;
1172
Steven Rostedtae63b312012-05-03 23:09:03 -04001173 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001174}
1175
1176static int
1177ftrace_event_set_open(struct inode *inode, struct file *file)
1178{
1179 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001180 struct trace_array *tr = inode->i_private;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001181
1182 if ((file->f_mode & FMODE_WRITE) &&
1183 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001184 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001185
Steven Rostedtae63b312012-05-03 23:09:03 -04001186 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001187}
1188
Steven Rostedtae63b312012-05-03 23:09:03 -04001189static struct event_subsystem *
1190create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001191{
1192 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001193
1194 /* need to create new entry */
1195 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001196 if (!system)
1197 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001198
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001199 system->ref_count = 1;
Steven Rostedt6d723732009-04-10 14:53:50 -04001200 system->name = kstrdup(name, GFP_KERNEL);
Steven Rostedt6d723732009-04-10 14:53:50 -04001201
Steven Rostedtae63b312012-05-03 23:09:03 -04001202 if (!system->name)
1203 goto out_free;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001204
Tom Zanussi30e673b2009-04-28 03:04:47 -05001205 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001206
Tom Zanussi8b372562009-04-28 03:04:59 -05001207 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001208 if (!system->filter)
1209 goto out_free;
1210
1211 list_add(&system->list, &event_subsystems);
1212
1213 return system;
1214
1215 out_free:
1216 kfree(system->name);
1217 kfree(system);
1218 return NULL;
1219}
1220
1221static struct dentry *
1222event_subsystem_dir(struct trace_array *tr, const char *name,
1223 struct ftrace_event_file *file, struct dentry *parent)
1224{
1225 struct ftrace_subsystem_dir *dir;
1226 struct event_subsystem *system;
1227 struct dentry *entry;
1228
1229 /* First see if we did not already create this dir */
1230 list_for_each_entry(dir, &tr->systems, list) {
1231 system = dir->subsystem;
1232 if (strcmp(system->name, name) == 0) {
1233 dir->nr_events++;
1234 file->system = dir;
1235 return dir->entry;
1236 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001237 }
1238
Steven Rostedtae63b312012-05-03 23:09:03 -04001239 /* Now see if the system itself exists. */
1240 list_for_each_entry(system, &event_subsystems, list) {
1241 if (strcmp(system->name, name) == 0)
1242 break;
1243 }
1244 /* Reset system variable when not found */
1245 if (&system->list == &event_subsystems)
1246 system = NULL;
1247
1248 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1249 if (!dir)
1250 goto out_fail;
1251
1252 if (!system) {
1253 system = create_new_subsystem(name);
1254 if (!system)
1255 goto out_free;
1256 } else
1257 __get_system(system);
1258
1259 dir->entry = debugfs_create_dir(name, parent);
1260 if (!dir->entry) {
1261 pr_warning("Failed to create system directory %s\n", name);
1262 __put_system(system);
1263 goto out_free;
1264 }
1265
1266 dir->tr = tr;
1267 dir->ref_count = 1;
1268 dir->nr_events = 1;
1269 dir->subsystem = system;
1270 file->system = dir;
1271
1272 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001273 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001274 if (!entry) {
1275 kfree(system->filter);
1276 system->filter = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -04001277 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001278 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001279
Steven Rostedtae63b312012-05-03 23:09:03 -04001280 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001281 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001282
Steven Rostedtae63b312012-05-03 23:09:03 -04001283 list_add(&dir->list, &tr->systems);
1284
1285 return dir->entry;
1286
1287 out_free:
1288 kfree(dir);
1289 out_fail:
1290 /* Only print this message if failed on memory allocation */
1291 if (!dir || !system)
1292 pr_warning("No memory to create event subsystem %s\n",
1293 name);
1294 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001295}
1296
Steven Rostedt1473e442009-02-24 14:15:08 -05001297static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001298event_create_dir(struct dentry *parent,
1299 struct ftrace_event_file *file,
Steven Rostedt701970b2009-04-24 23:11:22 -04001300 const struct file_operations *id,
1301 const struct file_operations *enable,
1302 const struct file_operations *filter,
1303 const struct file_operations *format)
Steven Rostedt1473e442009-02-24 14:15:08 -05001304{
Steven Rostedtae63b312012-05-03 23:09:03 -04001305 struct ftrace_event_call *call = file->event_call;
1306 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001307 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001308 struct dentry *d_events;
Steven Rostedtfd994982009-02-28 02:41:25 -05001309 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001310
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001311 /*
1312 * If the trace point header did not define TRACE_SYSTEM
1313 * then the system would be called "TRACE_SYSTEM".
1314 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001315 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1316 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1317 if (!d_events)
1318 return -ENOMEM;
1319 } else
1320 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001321
Steven Rostedtae63b312012-05-03 23:09:03 -04001322 file->dir = debugfs_create_dir(call->name, d_events);
1323 if (!file->dir) {
1324 pr_warning("Could not create debugfs '%s' directory\n",
1325 call->name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001326 return -1;
1327 }
1328
Steven Rostedt9b637762012-05-10 15:55:43 -04001329 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001330 trace_create_file("enable", 0644, file->dir, file,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001331 enable);
Steven Rostedt1473e442009-02-24 14:15:08 -05001332
Steven Rostedt22392912010-04-21 12:27:06 -04001333#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001334 if (call->event.type && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -04001335 trace_create_file("id", 0444, file->dir, call,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001336 id);
Steven Rostedt22392912010-04-21 12:27:06 -04001337#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001338
Li Zefanc9d932c2010-05-24 16:24:28 +08001339 /*
1340 * Other events may have the same class. Only update
1341 * the fields if they are not already defined.
1342 */
1343 head = trace_get_fields(call);
1344 if (list_empty(head)) {
1345 ret = call->class->define_fields(call);
1346 if (ret < 0) {
1347 pr_warning("Could not initialize trace point"
1348 " events/%s\n", call->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001349 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05001350 }
1351 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001352 trace_create_file("filter", 0644, file->dir, call,
Li Zefanc9d932c2010-05-24 16:24:28 +08001353 filter);
Tom Zanussicf027f62009-03-22 03:30:39 -05001354
Steven Rostedtae63b312012-05-03 23:09:03 -04001355 trace_create_file("format", 0444, file->dir, call,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001356 format);
Steven Rostedtfd994982009-02-28 02:41:25 -05001357
Steven Rostedt1473e442009-02-24 14:15:08 -05001358 return 0;
1359}
1360
Steven Rostedtae63b312012-05-03 23:09:03 -04001361static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1362{
1363 if (!dir)
1364 return;
1365
1366 if (!--dir->nr_events) {
1367 debugfs_remove_recursive(dir->entry);
1368 list_del(&dir->list);
1369 __put_system_dir(dir);
1370 }
1371}
1372
1373static void remove_event_from_tracers(struct ftrace_event_call *call)
1374{
1375 struct ftrace_event_file *file;
1376 struct trace_array *tr;
1377
1378 do_for_each_event_file_safe(tr, file) {
1379
1380 if (file->event_call != call)
1381 continue;
1382
1383 list_del(&file->list);
1384 debugfs_remove_recursive(file->dir);
1385 remove_subsystem(file->system);
1386 kfree(file);
1387
1388 /*
1389 * The do_for_each_event_file_safe() is
1390 * a double loop. After finding the call for this
1391 * trace_array, we use break to jump to the next
1392 * trace_array.
1393 */
1394 break;
1395 } while_for_each_event_file();
1396}
1397
Ezequiel Garcia87819152012-09-12 11:47:57 -03001398static void event_remove(struct ftrace_event_call *call)
1399{
Steven Rostedtae63b312012-05-03 23:09:03 -04001400 struct trace_array *tr;
1401 struct ftrace_event_file *file;
1402
1403 do_for_each_event_file(tr, file) {
1404 if (file->event_call != call)
1405 continue;
1406 ftrace_event_enable_disable(file, 0);
1407 /*
1408 * The do_for_each_event_file() is
1409 * a double loop. After finding the call for this
1410 * trace_array, we use break to jump to the next
1411 * trace_array.
1412 */
1413 break;
1414 } while_for_each_event_file();
1415
Ezequiel Garcia87819152012-09-12 11:47:57 -03001416 if (call->event.funcs)
1417 __unregister_ftrace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04001418 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001419 list_del(&call->list);
1420}
1421
1422static int event_init(struct ftrace_event_call *call)
1423{
1424 int ret = 0;
1425
1426 if (WARN_ON(!call->name))
1427 return -EINVAL;
1428
1429 if (call->class->raw_init) {
1430 ret = call->class->raw_init(call);
1431 if (ret < 0 && ret != -ENOSYS)
1432 pr_warn("Could not initialize trace events/%s\n",
1433 call->name);
1434 }
1435
1436 return ret;
1437}
1438
Li Zefan67ead0a2010-05-24 16:25:13 +08001439static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001440__register_event(struct ftrace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001441{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001442 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04001443
Ezequiel Garcia87819152012-09-12 11:47:57 -03001444 ret = event_init(call);
1445 if (ret < 0)
1446 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04001447
Steven Rostedtae63b312012-05-03 23:09:03 -04001448 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08001449 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001450
Steven Rostedtae63b312012-05-03 23:09:03 -04001451 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001452}
1453
Steven Rostedtae63b312012-05-03 23:09:03 -04001454/* Add an event to a trace directory */
1455static int
1456__trace_add_new_event(struct ftrace_event_call *call,
1457 struct trace_array *tr,
1458 const struct file_operations *id,
1459 const struct file_operations *enable,
1460 const struct file_operations *filter,
1461 const struct file_operations *format)
1462{
1463 struct ftrace_event_file *file;
1464
1465 file = kzalloc(sizeof(*file), GFP_KERNEL);
1466 if (!file)
1467 return -ENOMEM;
1468
1469 file->event_call = call;
1470 file->tr = tr;
1471 list_add(&file->list, &tr->events);
1472
1473 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1474}
1475
1476struct ftrace_module_file_ops;
1477static void __add_event_to_tracers(struct ftrace_event_call *call,
1478 struct ftrace_module_file_ops *file_ops);
1479
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001480/* Add an additional event_call dynamically */
1481int trace_add_event_call(struct ftrace_event_call *call)
1482{
1483 int ret;
1484 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001485
1486 ret = __register_event(call, NULL);
1487 if (ret >= 0)
1488 __add_event_to_tracers(call, NULL);
1489
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001490 mutex_unlock(&event_mutex);
1491 return ret;
1492}
Steven Rostedt701970b2009-04-24 23:11:22 -04001493
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001494/*
1495 * Must be called under locking both of event_mutex and trace_event_mutex.
1496 */
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001497static void __trace_remove_event_call(struct ftrace_event_call *call)
1498{
Ezequiel Garcia87819152012-09-12 11:47:57 -03001499 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001500 trace_destroy_fields(call);
1501 destroy_preds(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001502}
1503
1504/* Remove an event_call */
1505void trace_remove_event_call(struct ftrace_event_call *call)
1506{
1507 mutex_lock(&event_mutex);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001508 down_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001509 __trace_remove_event_call(call);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001510 up_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001511 mutex_unlock(&event_mutex);
1512}
1513
1514#define for_each_event(event, start, end) \
1515 for (event = start; \
1516 (unsigned long)event < (unsigned long)end; \
1517 event++)
1518
1519#ifdef CONFIG_MODULES
1520
1521static LIST_HEAD(ftrace_module_file_list);
1522
1523/*
1524 * Modules must own their file_operations to keep up with
1525 * reference counting.
1526 */
1527struct ftrace_module_file_ops {
1528 struct list_head list;
1529 struct module *mod;
1530 struct file_operations id;
1531 struct file_operations enable;
1532 struct file_operations format;
1533 struct file_operations filter;
1534};
1535
Steven Rostedtae63b312012-05-03 23:09:03 -04001536static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1537{
1538 struct ftrace_module_file_ops *file_ops;
1539
1540 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1541 if (file_ops->mod == mod)
1542 return file_ops;
1543 }
1544 return NULL;
1545}
1546
Steven Rostedt701970b2009-04-24 23:11:22 -04001547static struct ftrace_module_file_ops *
1548trace_create_file_ops(struct module *mod)
1549{
1550 struct ftrace_module_file_ops *file_ops;
1551
1552 /*
1553 * This is a bit of a PITA. To allow for correct reference
1554 * counting, modules must "own" their file_operations.
1555 * To do this, we allocate the file operations that will be
1556 * used in the event directory.
1557 */
1558
1559 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1560 if (!file_ops)
1561 return NULL;
1562
1563 file_ops->mod = mod;
1564
1565 file_ops->id = ftrace_event_id_fops;
1566 file_ops->id.owner = mod;
1567
1568 file_ops->enable = ftrace_enable_fops;
1569 file_ops->enable.owner = mod;
1570
1571 file_ops->filter = ftrace_event_filter_fops;
1572 file_ops->filter.owner = mod;
1573
1574 file_ops->format = ftrace_event_format_fops;
1575 file_ops->format.owner = mod;
1576
1577 list_add(&file_ops->list, &ftrace_module_file_list);
1578
1579 return file_ops;
1580}
1581
Steven Rostedt6d723732009-04-10 14:53:50 -04001582static void trace_module_add_events(struct module *mod)
1583{
Steven Rostedt701970b2009-04-24 23:11:22 -04001584 struct ftrace_module_file_ops *file_ops = NULL;
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001585 struct ftrace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04001586
1587 start = mod->trace_events;
1588 end = mod->trace_events + mod->num_trace_events;
1589
1590 if (start == end)
1591 return;
1592
Li Zefan67ead0a2010-05-24 16:25:13 +08001593 file_ops = trace_create_file_ops(mod);
1594 if (!file_ops)
Steven Rostedt6d723732009-04-10 14:53:50 -04001595 return;
1596
1597 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001598 __register_event(*call, mod);
1599 __add_event_to_tracers(*call, file_ops);
Steven Rostedt6d723732009-04-10 14:53:50 -04001600 }
1601}
1602
1603static void trace_module_remove_events(struct module *mod)
1604{
Steven Rostedt701970b2009-04-24 23:11:22 -04001605 struct ftrace_module_file_ops *file_ops;
Steven Rostedt6d723732009-04-10 14:53:50 -04001606 struct ftrace_event_call *call, *p;
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001607 bool found = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04001608
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001609 down_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001610 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1611 if (call->mod == mod) {
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001612 found = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001613 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001614 }
1615 }
Steven Rostedt701970b2009-04-24 23:11:22 -04001616
1617 /* Now free the file_operations */
1618 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1619 if (file_ops->mod == mod)
1620 break;
1621 }
1622 if (&file_ops->list != &ftrace_module_file_list) {
1623 list_del(&file_ops->list);
1624 kfree(file_ops);
1625 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001626
1627 /*
1628 * It is safest to reset the ring buffer if the module being unloaded
1629 * registered any events.
1630 */
1631 if (found)
1632 tracing_reset_current_online_cpus();
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001633 up_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001634}
1635
Steven Rostedt61f919a2009-04-14 18:22:32 -04001636static int trace_module_notify(struct notifier_block *self,
1637 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04001638{
1639 struct module *mod = data;
1640
1641 mutex_lock(&event_mutex);
1642 switch (val) {
1643 case MODULE_STATE_COMING:
1644 trace_module_add_events(mod);
1645 break;
1646 case MODULE_STATE_GOING:
1647 trace_module_remove_events(mod);
1648 break;
1649 }
1650 mutex_unlock(&event_mutex);
1651
1652 return 0;
1653}
Steven Rostedt61f919a2009-04-14 18:22:32 -04001654#else
Steven Rostedtae63b312012-05-03 23:09:03 -04001655static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1656{
1657 return NULL;
1658}
Steven Rostedt61f919a2009-04-14 18:22:32 -04001659static int trace_module_notify(struct notifier_block *self,
1660 unsigned long val, void *data)
1661{
1662 return 0;
1663}
1664#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04001665
Steven Rostedtae63b312012-05-03 23:09:03 -04001666/* Create a new event directory structure for a trace directory. */
1667static void
1668__trace_add_event_dirs(struct trace_array *tr)
1669{
1670 struct ftrace_module_file_ops *file_ops = NULL;
1671 struct ftrace_event_call *call;
1672 int ret;
1673
1674 list_for_each_entry(call, &ftrace_events, list) {
1675 if (call->mod) {
1676 /*
1677 * Directories for events by modules need to
1678 * keep module ref counts when opened (as we don't
1679 * want the module to disappear when reading one
1680 * of these files). The file_ops keep account of
1681 * the module ref count.
1682 *
1683 * As event_calls are added in groups by module,
1684 * when we find one file_ops, we don't need to search for
1685 * each call in that module, as the rest should be the
1686 * same. Only search for a new one if the last one did
1687 * not match.
1688 */
1689 if (!file_ops || call->mod != file_ops->mod)
1690 file_ops = find_ftrace_file_ops(call->mod);
1691 if (!file_ops)
1692 continue; /* Warn? */
1693 ret = __trace_add_new_event(call, tr,
1694 &file_ops->id, &file_ops->enable,
1695 &file_ops->filter, &file_ops->format);
1696 if (ret < 0)
1697 pr_warning("Could not create directory for event %s\n",
1698 call->name);
1699 continue;
1700 }
1701 ret = __trace_add_new_event(call, tr,
1702 &ftrace_event_id_fops,
1703 &ftrace_enable_fops,
1704 &ftrace_event_filter_fops,
1705 &ftrace_event_format_fops);
1706 if (ret < 0)
1707 pr_warning("Could not create directory for event %s\n",
1708 call->name);
1709 }
1710}
1711
1712static void
1713__add_event_to_tracers(struct ftrace_event_call *call,
1714 struct ftrace_module_file_ops *file_ops)
1715{
1716 struct trace_array *tr;
1717
1718 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1719 if (file_ops)
1720 __trace_add_new_event(call, tr,
1721 &file_ops->id, &file_ops->enable,
1722 &file_ops->filter, &file_ops->format);
1723 else
1724 __trace_add_new_event(call, tr,
1725 &ftrace_event_id_fops,
1726 &ftrace_enable_fops,
1727 &ftrace_event_filter_fops,
1728 &ftrace_event_format_fops);
1729 }
1730}
1731
Steven Rostedtec827c72009-09-14 10:50:23 -04001732static struct notifier_block trace_module_nb = {
Steven Rostedt6d723732009-04-10 14:53:50 -04001733 .notifier_call = trace_module_notify,
1734 .priority = 0,
1735};
1736
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001737extern struct ftrace_event_call *__start_ftrace_events[];
1738extern struct ftrace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04001739
Li Zefan020e5f82009-07-01 10:47:05 +08001740static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1741
1742static __init int setup_trace_event(char *str)
1743{
1744 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1745 ring_buffer_expanded = 1;
1746 tracing_selftest_disabled = 1;
1747
1748 return 1;
1749}
1750__setup("trace_event=", setup_trace_event);
1751
Steven Rostedtae63b312012-05-03 23:09:03 -04001752int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1753{
1754 struct dentry *d_events;
1755 struct dentry *entry;
1756
Steven Rostedt277ba042012-08-03 16:10:49 -04001757 mutex_lock(&event_mutex);
1758
Steven Rostedtae63b312012-05-03 23:09:03 -04001759 entry = debugfs_create_file("set_event", 0644, parent,
1760 tr, &ftrace_set_event_fops);
1761 if (!entry) {
1762 pr_warning("Could not create debugfs 'set_event' entry\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04001763 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001764 return -ENOMEM;
1765 }
1766
1767 d_events = debugfs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04001768 if (!d_events) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001769 pr_warning("Could not create debugfs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04001770 mutex_unlock(&event_mutex);
1771 return -ENOMEM;
1772 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001773
1774 /* ring buffer internal formats */
1775 trace_create_file("header_page", 0444, d_events,
1776 ring_buffer_print_page_header,
1777 &ftrace_show_header_fops);
1778
1779 trace_create_file("header_event", 0444, d_events,
1780 ring_buffer_print_entry_header,
1781 &ftrace_show_header_fops);
1782
1783 trace_create_file("enable", 0644, d_events,
1784 tr, &ftrace_tr_enable_fops);
1785
1786 tr->event_dir = d_events;
Steven Rostedt277ba042012-08-03 16:10:49 -04001787 down_write(&trace_event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001788 __trace_add_event_dirs(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04001789 up_write(&trace_event_mutex);
1790
1791 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001792
1793 return 0;
1794}
1795
Ezequiel Garcia87819152012-09-12 11:47:57 -03001796static __init int event_trace_enable(void)
1797{
Steven Rostedtae63b312012-05-03 23:09:03 -04001798 struct trace_array *tr = top_trace_array();
Ezequiel Garcia87819152012-09-12 11:47:57 -03001799 struct ftrace_event_call **iter, *call;
1800 char *buf = bootup_event_buf;
1801 char *token;
1802 int ret;
1803
1804 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1805
1806 call = *iter;
1807 ret = event_init(call);
1808 if (!ret)
1809 list_add(&call->list, &ftrace_events);
1810 }
1811
1812 while (true) {
1813 token = strsep(&buf, ",");
1814
1815 if (!token)
1816 break;
1817 if (!*token)
1818 continue;
1819
Steven Rostedtae63b312012-05-03 23:09:03 -04001820 ret = ftrace_set_clr_event(tr, token, 1);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001821 if (ret)
1822 pr_warn("Failed to enable trace event: %s\n", token);
1823 }
Steven Rostedt81698832012-10-11 10:15:05 -04001824
1825 trace_printk_start_comm();
1826
Ezequiel Garcia87819152012-09-12 11:47:57 -03001827 return 0;
1828}
1829
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001830static __init int event_trace_init(void)
1831{
Steven Rostedtae63b312012-05-03 23:09:03 -04001832 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001833 struct dentry *d_tracer;
1834 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04001835 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001836
Steven Rostedtae63b312012-05-03 23:09:03 -04001837 tr = top_trace_array();
1838
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001839 d_tracer = tracing_init_dentry();
1840 if (!d_tracer)
1841 return 0;
1842
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001843 entry = debugfs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04001844 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001845 if (!entry)
1846 pr_warning("Could not create debugfs "
1847 "'available_events' entry\n");
1848
Li Zefan8728fe52010-05-24 16:22:49 +08001849 if (trace_define_common_fields())
1850 pr_warning("tracing: Failed to allocate common fields");
1851
Steven Rostedtae63b312012-05-03 23:09:03 -04001852 ret = event_trace_add_tracer(d_tracer, tr);
1853 if (ret)
1854 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08001855
Steven Rostedt6d723732009-04-10 14:53:50 -04001856 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08001857 if (ret)
Steven Rostedt6d723732009-04-10 14:53:50 -04001858 pr_warning("Failed to register trace events module notifier\n");
1859
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001860 return 0;
1861}
Ezequiel Garcia87819152012-09-12 11:47:57 -03001862core_initcall(event_trace_enable);
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001863fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04001864
1865#ifdef CONFIG_FTRACE_STARTUP_TEST
1866
1867static DEFINE_SPINLOCK(test_spinlock);
1868static DEFINE_SPINLOCK(test_spinlock_irq);
1869static DEFINE_MUTEX(test_mutex);
1870
1871static __init void test_work(struct work_struct *dummy)
1872{
1873 spin_lock(&test_spinlock);
1874 spin_lock_irq(&test_spinlock_irq);
1875 udelay(1);
1876 spin_unlock_irq(&test_spinlock_irq);
1877 spin_unlock(&test_spinlock);
1878
1879 mutex_lock(&test_mutex);
1880 msleep(1);
1881 mutex_unlock(&test_mutex);
1882}
1883
1884static __init int event_test_thread(void *unused)
1885{
1886 void *test_malloc;
1887
1888 test_malloc = kmalloc(1234, GFP_KERNEL);
1889 if (!test_malloc)
1890 pr_info("failed to kmalloc\n");
1891
1892 schedule_on_each_cpu(test_work);
1893
1894 kfree(test_malloc);
1895
1896 set_current_state(TASK_INTERRUPTIBLE);
1897 while (!kthread_should_stop())
1898 schedule();
1899
1900 return 0;
1901}
1902
1903/*
1904 * Do various things that may trigger events.
1905 */
1906static __init void event_test_stuff(void)
1907{
1908 struct task_struct *test_thread;
1909
1910 test_thread = kthread_run(event_test_thread, NULL, "test-events");
1911 msleep(1);
1912 kthread_stop(test_thread);
1913}
1914
1915/*
1916 * For every trace event defined, we will test each trace point separately,
1917 * and then by groups, and finally all trace points.
1918 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04001919static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04001920{
Steven Rostedtae63b312012-05-03 23:09:03 -04001921 struct ftrace_subsystem_dir *dir;
1922 struct ftrace_event_file *file;
Steven Rostedte6187002009-04-15 13:36:40 -04001923 struct ftrace_event_call *call;
1924 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04001925 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04001926 int ret;
1927
Steven Rostedtae63b312012-05-03 23:09:03 -04001928 tr = top_trace_array();
1929
Steven Rostedte6187002009-04-15 13:36:40 -04001930 pr_info("Running tests on trace events:\n");
1931
Steven Rostedtae63b312012-05-03 23:09:03 -04001932 list_for_each_entry(file, &tr->events, list) {
1933
1934 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04001935
Steven Rostedt22392912010-04-21 12:27:06 -04001936 /* Only test those that have a probe */
1937 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04001938 continue;
1939
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04001940/*
1941 * Testing syscall events here is pretty useless, but
1942 * we still do it if configured. But this is time consuming.
1943 * What we really need is a user thread to perform the
1944 * syscalls as we test.
1945 */
1946#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04001947 if (call->class->system &&
1948 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04001949 continue;
1950#endif
1951
Steven Rostedte6187002009-04-15 13:36:40 -04001952 pr_info("Testing event %s: ", call->name);
1953
1954 /*
1955 * If an event is already enabled, someone is using
1956 * it and the self test should not be on.
1957 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001958 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
Steven Rostedte6187002009-04-15 13:36:40 -04001959 pr_warning("Enabled event during self test!\n");
1960 WARN_ON_ONCE(1);
1961 continue;
1962 }
1963
Steven Rostedtae63b312012-05-03 23:09:03 -04001964 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04001965 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04001966 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04001967
1968 pr_cont("OK\n");
1969 }
1970
1971 /* Now test at the sub system level */
1972
1973 pr_info("Running tests on trace event systems:\n");
1974
Steven Rostedtae63b312012-05-03 23:09:03 -04001975 list_for_each_entry(dir, &tr->systems, list) {
1976
1977 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04001978
1979 /* the ftrace system is special, skip it */
1980 if (strcmp(system->name, "ftrace") == 0)
1981 continue;
1982
1983 pr_info("Testing event system %s: ", system->name);
1984
Steven Rostedtae63b312012-05-03 23:09:03 -04001985 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04001986 if (WARN_ON_ONCE(ret)) {
1987 pr_warning("error enabling system %s\n",
1988 system->name);
1989 continue;
1990 }
1991
1992 event_test_stuff();
1993
Steven Rostedtae63b312012-05-03 23:09:03 -04001994 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08001995 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04001996 pr_warning("error disabling system %s\n",
1997 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08001998 continue;
1999 }
Steven Rostedte6187002009-04-15 13:36:40 -04002000
2001 pr_cont("OK\n");
2002 }
2003
2004 /* Test with all events enabled */
2005
2006 pr_info("Running tests on all trace events:\n");
2007 pr_info("Testing all events: ");
2008
Steven Rostedtae63b312012-05-03 23:09:03 -04002009 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002010 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04002011 pr_warning("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002012 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002013 }
2014
2015 event_test_stuff();
2016
2017 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04002018 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002019 if (WARN_ON_ONCE(ret)) {
2020 pr_warning("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002021 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002022 }
2023
2024 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002025}
2026
2027#ifdef CONFIG_FUNCTION_TRACER
2028
Tejun Heo245b2e72009-06-24 15:13:48 +09002029static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002030
2031static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04002032function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04002033 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002034{
2035 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002036 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002037 struct ftrace_entry *entry;
2038 unsigned long flags;
2039 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002040 int cpu;
2041 int pc;
2042
2043 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002044 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002045 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09002046 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002047
2048 if (disabled != 1)
2049 goto out;
2050
2051 local_save_flags(flags);
2052
Steven Rostedte77405a2009-09-02 14:17:06 -04002053 event = trace_current_buffer_lock_reserve(&buffer,
2054 TRACE_FN, sizeof(*entry),
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002055 flags, pc);
2056 if (!event)
2057 goto out;
2058 entry = ring_buffer_event_data(event);
2059 entry->ip = ip;
2060 entry->parent_ip = parent_ip;
2061
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002062 trace_buffer_unlock_commit(buffer, event, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002063
2064 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09002065 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04002066 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002067}
2068
2069static struct ftrace_ops trace_ops __initdata =
2070{
2071 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04002072 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002073};
2074
2075static __init void event_trace_self_test_with_function(void)
2076{
Steven Rostedt17bb6152011-05-23 15:27:46 -04002077 int ret;
2078 ret = register_ftrace_function(&trace_ops);
2079 if (WARN_ON(ret < 0)) {
2080 pr_info("Failed to enable function tracer for event tests\n");
2081 return;
2082 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002083 pr_info("Running tests again, along with the function tracer\n");
2084 event_trace_self_tests();
2085 unregister_ftrace_function(&trace_ops);
2086}
2087#else
2088static __init void event_trace_self_test_with_function(void)
2089{
2090}
2091#endif
2092
2093static __init int event_trace_self_tests_init(void)
2094{
Li Zefan020e5f82009-07-01 10:47:05 +08002095 if (!tracing_selftest_disabled) {
2096 event_trace_self_tests();
2097 event_trace_self_test_with_function();
2098 }
Steven Rostedte6187002009-04-15 13:36:40 -04002099
2100 return 0;
2101}
2102
Steven Rostedt28d20e22009-04-20 12:12:44 -04002103late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002104
2105#endif