blob: 21fe83b4106a97fca4ec49a42c14a06fbdde712b [file] [log] [blame]
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001/*
2 * event tracer
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
Steven Rostedt981d0812009-03-02 13:53:59 -05006 * - Added format output of fields of the trace point.
7 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8 *
Steven Rostedtb77e38a2009-02-24 10:21:36 -05009 */
10
Steven Rostedte6187002009-04-15 13:36:40 -040011#include <linux/workqueue.h>
12#include <linux/spinlock.h>
13#include <linux/kthread.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050014#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/module.h>
17#include <linux/ctype.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Steven Rostedte6187002009-04-15 13:36:40 -040019#include <linux/delay.h>
Steven Rostedtb77e38a2009-02-24 10:21:36 -050020
Li Zefan020e5f82009-07-01 10:47:05 +080021#include <asm/setup.h>
22
Steven Rostedt91729ef2009-03-02 15:03:01 -050023#include "trace_output.h"
Steven Rostedtb77e38a2009-02-24 10:21:36 -050024
Steven Rostedt4e5292e2009-09-12 19:26:21 -040025#undef TRACE_SYSTEM
Steven Rostedtb628b3e2009-02-27 23:32:58 -050026#define TRACE_SYSTEM "TRACE_SYSTEM"
27
Li Zefan20c89282009-05-06 10:33:45 +080028DEFINE_MUTEX(event_mutex);
Steven Rostedt11a241a2009-03-02 11:49:04 -050029
Steven Rostedt04295782010-11-12 22:32:11 -050030DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
Steven Rostedta59fd602009-04-10 13:52:20 -040036LIST_HEAD(ftrace_events);
Li Zefan8728fe52010-05-24 16:22:49 +080037LIST_HEAD(ftrace_common_fields);
Steven Rostedta59fd602009-04-10 13:52:20 -040038
Steven Rostedtae63b312012-05-03 23:09:03 -040039/* Double loops, do not use break, only goto's work */
40#define do_for_each_event_file(tr, file) \
41 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
42 list_for_each_entry(file, &tr->events, list)
43
44#define do_for_each_event_file_safe(tr, file) \
45 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
46 struct ftrace_event_file *___n; \
47 list_for_each_entry_safe(file, ___n, &tr->events, list)
48
49#define while_for_each_event_file() \
50 }
51
Steven Rostedt2e33af02010-04-22 10:35:55 -040052struct list_head *
53trace_get_fields(struct ftrace_event_call *event_call)
54{
55 if (!event_call->class->get_fields)
56 return &event_call->class->fields;
57 return event_call->class->get_fields(event_call);
58}
59
Li Zefan8728fe52010-05-24 16:22:49 +080060static int __trace_define_field(struct list_head *head, const char *type,
61 const char *name, int offset, int size,
62 int is_signed, int filter_type)
Tom Zanussicf027f62009-03-22 03:30:39 -050063{
64 struct ftrace_event_field *field;
65
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010066 field = kzalloc(sizeof(*field), GFP_KERNEL);
Tom Zanussicf027f62009-03-22 03:30:39 -050067 if (!field)
68 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010069
Tom Zanussicf027f62009-03-22 03:30:39 -050070 field->name = kstrdup(name, GFP_KERNEL);
71 if (!field->name)
72 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010073
Tom Zanussicf027f62009-03-22 03:30:39 -050074 field->type = kstrdup(type, GFP_KERNEL);
75 if (!field->type)
76 goto err;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010077
Li Zefan43b51ea2009-08-07 10:33:22 +080078 if (filter_type == FILTER_OTHER)
79 field->filter_type = filter_assign_type(type);
80 else
81 field->filter_type = filter_type;
82
Tom Zanussicf027f62009-03-22 03:30:39 -050083 field->offset = offset;
84 field->size = size;
Tom Zanussia118e4d2009-04-28 03:04:53 -050085 field->is_signed = is_signed;
Li Zefanaa38e9f2009-08-07 10:33:02 +080086
Steven Rostedt2e33af02010-04-22 10:35:55 -040087 list_add(&field->link, head);
Tom Zanussicf027f62009-03-22 03:30:39 -050088
89 return 0;
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010090
Tom Zanussicf027f62009-03-22 03:30:39 -050091err:
Wenji Huang7b60997f2010-02-24 15:40:26 +080092 if (field)
Tom Zanussicf027f62009-03-22 03:30:39 -050093 kfree(field->name);
Tom Zanussicf027f62009-03-22 03:30:39 -050094 kfree(field);
Ingo Molnarfe9f57f2009-03-22 18:41:59 +010095
Tom Zanussicf027f62009-03-22 03:30:39 -050096 return -ENOMEM;
97}
Li Zefan8728fe52010-05-24 16:22:49 +080098
99int trace_define_field(struct ftrace_event_call *call, const char *type,
100 const char *name, int offset, int size, int is_signed,
101 int filter_type)
102{
103 struct list_head *head;
104
105 if (WARN_ON(!call->class))
106 return 0;
107
108 head = trace_get_fields(call);
109 return __trace_define_field(head, type, name, offset, size,
110 is_signed, filter_type);
111}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400112EXPORT_SYMBOL_GPL(trace_define_field);
Tom Zanussicf027f62009-03-22 03:30:39 -0500113
Li Zefane647d6b2009-08-19 15:54:32 +0800114#define __common_field(type, item) \
Li Zefan8728fe52010-05-24 16:22:49 +0800115 ret = __trace_define_field(&ftrace_common_fields, #type, \
116 "common_" #item, \
117 offsetof(typeof(ent), item), \
118 sizeof(ent.item), \
119 is_signed_type(type), FILTER_OTHER); \
Li Zefane647d6b2009-08-19 15:54:32 +0800120 if (ret) \
121 return ret;
122
Li Zefan8728fe52010-05-24 16:22:49 +0800123static int trace_define_common_fields(void)
Li Zefane647d6b2009-08-19 15:54:32 +0800124{
125 int ret;
126 struct trace_entry ent;
127
128 __common_field(unsigned short, type);
129 __common_field(unsigned char, flags);
130 __common_field(unsigned char, preempt_count);
131 __common_field(int, pid);
Li Zefane647d6b2009-08-19 15:54:32 +0800132
133 return ret;
134}
135
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -0400136void trace_destroy_fields(struct ftrace_event_call *call)
Li Zefan2df75e42009-05-06 10:33:04 +0800137{
138 struct ftrace_event_field *field, *next;
Steven Rostedt2e33af02010-04-22 10:35:55 -0400139 struct list_head *head;
Li Zefan2df75e42009-05-06 10:33:04 +0800140
Steven Rostedt2e33af02010-04-22 10:35:55 -0400141 head = trace_get_fields(call);
142 list_for_each_entry_safe(field, next, head, link) {
Li Zefan2df75e42009-05-06 10:33:04 +0800143 list_del(&field->link);
144 kfree(field->type);
145 kfree(field->name);
146 kfree(field);
147 }
148}
149
Li Zefan87d9b4e2009-12-08 11:14:20 +0800150int trace_event_raw_init(struct ftrace_event_call *call)
151{
152 int id;
153
Steven Rostedt80decc72010-04-23 10:00:22 -0400154 id = register_ftrace_event(&call->event);
Li Zefan87d9b4e2009-12-08 11:14:20 +0800155 if (!id)
156 return -ENODEV;
Li Zefan87d9b4e2009-12-08 11:14:20 +0800157
158 return 0;
159}
160EXPORT_SYMBOL_GPL(trace_event_raw_init);
161
Jiri Olsaceec0b62012-02-15 15:51:49 +0100162int ftrace_event_reg(struct ftrace_event_call *call,
163 enum trace_reg type, void *data)
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400164{
Steven Rostedtae63b312012-05-03 23:09:03 -0400165 struct ftrace_event_file *file = data;
166
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400167 switch (type) {
168 case TRACE_REG_REGISTER:
169 return tracepoint_probe_register(call->name,
170 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400171 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400172 case TRACE_REG_UNREGISTER:
173 tracepoint_probe_unregister(call->name,
174 call->class->probe,
Steven Rostedtae63b312012-05-03 23:09:03 -0400175 file);
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400176 return 0;
177
178#ifdef CONFIG_PERF_EVENTS
179 case TRACE_REG_PERF_REGISTER:
180 return tracepoint_probe_register(call->name,
181 call->class->perf_probe,
182 call);
183 case TRACE_REG_PERF_UNREGISTER:
184 tracepoint_probe_unregister(call->name,
185 call->class->perf_probe,
186 call);
187 return 0;
Jiri Olsaceec0b62012-02-15 15:51:49 +0100188 case TRACE_REG_PERF_OPEN:
189 case TRACE_REG_PERF_CLOSE:
Jiri Olsa489c75c2012-02-15 15:51:50 +0100190 case TRACE_REG_PERF_ADD:
191 case TRACE_REG_PERF_DEL:
Jiri Olsaceec0b62012-02-15 15:51:49 +0100192 return 0;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400193#endif
194 }
195 return 0;
196}
197EXPORT_SYMBOL_GPL(ftrace_event_reg);
198
Li Zefane870e9a2010-07-02 11:07:32 +0800199void trace_event_enable_cmd_record(bool enable)
200{
Steven Rostedtae63b312012-05-03 23:09:03 -0400201 struct ftrace_event_file *file;
202 struct trace_array *tr;
Li Zefane870e9a2010-07-02 11:07:32 +0800203
204 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400205 do_for_each_event_file(tr, file) {
206
207 if (!(file->flags & FTRACE_EVENT_FL_ENABLED))
Li Zefane870e9a2010-07-02 11:07:32 +0800208 continue;
209
210 if (enable) {
211 tracing_start_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400212 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800213 } else {
214 tracing_stop_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400215 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800216 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400217 } while_for_each_event_file();
Li Zefane870e9a2010-07-02 11:07:32 +0800218 mutex_unlock(&event_mutex);
219}
220
Steven Rostedtae63b312012-05-03 23:09:03 -0400221static int ftrace_event_enable_disable(struct ftrace_event_file *file,
222 int enable)
Steven Rostedtfd994982009-02-28 02:41:25 -0500223{
Steven Rostedtae63b312012-05-03 23:09:03 -0400224 struct ftrace_event_call *call = file->event_call;
Li Zefan3b8e4272009-12-08 11:14:52 +0800225 int ret = 0;
226
Steven Rostedtfd994982009-02-28 02:41:25 -0500227 switch (enable) {
228 case 0:
Steven Rostedtae63b312012-05-03 23:09:03 -0400229 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
230 file->flags &= ~FTRACE_EVENT_FL_ENABLED;
231 if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) {
Li Zefane870e9a2010-07-02 11:07:32 +0800232 tracing_stop_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400233 file->flags &= ~FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800234 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400235 call->class->reg(call, TRACE_REG_UNREGISTER, file);
Steven Rostedtfd994982009-02-28 02:41:25 -0500236 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500237 break;
238 case 1:
Steven Rostedtae63b312012-05-03 23:09:03 -0400239 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
Li Zefane870e9a2010-07-02 11:07:32 +0800240 if (trace_flags & TRACE_ITER_RECORD_CMD) {
241 tracing_start_cmdline_record();
Steven Rostedtae63b312012-05-03 23:09:03 -0400242 file->flags |= FTRACE_EVENT_FL_RECORDED_CMD;
Li Zefane870e9a2010-07-02 11:07:32 +0800243 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400244 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
Li Zefan3b8e4272009-12-08 11:14:52 +0800245 if (ret) {
246 tracing_stop_cmdline_record();
247 pr_info("event trace: Could not enable event "
248 "%s\n", call->name);
249 break;
250 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400251 file->flags |= FTRACE_EVENT_FL_ENABLED;
Steven Rostedtfd994982009-02-28 02:41:25 -0500252 }
Steven Rostedtfd994982009-02-28 02:41:25 -0500253 break;
254 }
Li Zefan3b8e4272009-12-08 11:14:52 +0800255
256 return ret;
Steven Rostedtfd994982009-02-28 02:41:25 -0500257}
258
Steven Rostedtae63b312012-05-03 23:09:03 -0400259static void ftrace_clear_events(struct trace_array *tr)
Zhaolei0e907c92009-05-25 18:13:59 +0800260{
Steven Rostedtae63b312012-05-03 23:09:03 -0400261 struct ftrace_event_file *file;
Zhaolei0e907c92009-05-25 18:13:59 +0800262
263 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400264 list_for_each_entry(file, &tr->events, list) {
265 ftrace_event_enable_disable(file, 0);
Zhaolei0e907c92009-05-25 18:13:59 +0800266 }
267 mutex_unlock(&event_mutex);
268}
269
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400270static void __put_system(struct event_subsystem *system)
271{
272 struct event_filter *filter = system->filter;
273
274 WARN_ON_ONCE(system->ref_count == 0);
275 if (--system->ref_count)
276 return;
277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278 list_del(&system->list);
279
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400280 if (filter) {
281 kfree(filter->filter_string);
282 kfree(filter);
283 }
284 kfree(system->name);
285 kfree(system);
286}
287
288static void __get_system(struct event_subsystem *system)
289{
290 WARN_ON_ONCE(system->ref_count == 0);
291 system->ref_count++;
292}
293
Steven Rostedtae63b312012-05-03 23:09:03 -0400294static void __get_system_dir(struct ftrace_subsystem_dir *dir)
295{
296 WARN_ON_ONCE(dir->ref_count == 0);
297 dir->ref_count++;
298 __get_system(dir->subsystem);
299}
300
301static void __put_system_dir(struct ftrace_subsystem_dir *dir)
302{
303 WARN_ON_ONCE(dir->ref_count == 0);
304 /* If the subsystem is about to be freed, the dir must be too */
305 WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
306
307 __put_system(dir->subsystem);
308 if (!--dir->ref_count)
309 kfree(dir);
310}
311
312static void put_system(struct ftrace_subsystem_dir *dir)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400313{
314 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400315 __put_system_dir(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400316 mutex_unlock(&event_mutex);
317}
318
Li Zefan8f31bfe2009-05-08 10:31:42 +0800319/*
320 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
321 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400322static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
323 const char *sub, const char *event, int set)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500324{
Steven Rostedtae63b312012-05-03 23:09:03 -0400325 struct ftrace_event_file *file;
Steven Rostedta59fd602009-04-10 13:52:20 -0400326 struct ftrace_event_call *call;
Steven Rostedt29f93942009-05-08 16:06:47 -0400327 int ret = -EINVAL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500328
Steven Rostedt11a241a2009-03-02 11:49:04 -0500329 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400330 list_for_each_entry(file, &tr->events, list) {
331
332 call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500333
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400334 if (!call->name || !call->class || !call->class->reg)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500335 continue;
Steven Rostedt1473e442009-02-24 14:15:08 -0500336
Steven Rostedt9b637762012-05-10 15:55:43 -0400337 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
338 continue;
339
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500340 if (match &&
341 strcmp(match, call->name) != 0 &&
Steven Rostedt8f082012010-04-20 10:47:33 -0400342 strcmp(match, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500343 continue;
344
Steven Rostedt8f082012010-04-20 10:47:33 -0400345 if (sub && strcmp(sub, call->class->system) != 0)
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500346 continue;
347
348 if (event && strcmp(event, call->name) != 0)
Steven Rostedt1473e442009-02-24 14:15:08 -0500349 continue;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500350
Steven Rostedtae63b312012-05-03 23:09:03 -0400351 ftrace_event_enable_disable(file, set);
Steven Rostedtfd994982009-02-28 02:41:25 -0500352
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500353 ret = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500354 }
Steven Rostedt11a241a2009-03-02 11:49:04 -0500355 mutex_unlock(&event_mutex);
356
Steven Rostedtb628b3e2009-02-27 23:32:58 -0500357 return ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500358}
359
Steven Rostedtae63b312012-05-03 23:09:03 -0400360static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800361{
362 char *event = NULL, *sub = NULL, *match;
363
364 /*
365 * The buf format can be <subsystem>:<event-name>
366 * *:<event-name> means any event by that name.
367 * :<event-name> is the same.
368 *
369 * <subsystem>:* means all events in that subsystem
370 * <subsystem>: means the same.
371 *
372 * <name> (no ':') means all events in a subsystem with
373 * the name <name> or any event that matches <name>
374 */
375
376 match = strsep(&buf, ":");
377 if (buf) {
378 sub = match;
379 event = buf;
380 match = NULL;
381
382 if (!strlen(sub) || strcmp(sub, "*") == 0)
383 sub = NULL;
384 if (!strlen(event) || strcmp(event, "*") == 0)
385 event = NULL;
386 }
387
Steven Rostedtae63b312012-05-03 23:09:03 -0400388 return __ftrace_set_clr_event(tr, match, sub, event, set);
Li Zefan8f31bfe2009-05-08 10:31:42 +0800389}
390
Steven Rostedt4671c792009-05-08 16:27:41 -0400391/**
392 * trace_set_clr_event - enable or disable an event
393 * @system: system name to match (NULL for any system)
394 * @event: event name to match (NULL for all events, within system)
395 * @set: 1 to enable, 0 to disable
396 *
397 * This is a way for other parts of the kernel to enable or disable
398 * event recording.
399 *
400 * Returns 0 on success, -EINVAL if the parameters do not match any
401 * registered events.
402 */
403int trace_set_clr_event(const char *system, const char *event, int set)
404{
Steven Rostedtae63b312012-05-03 23:09:03 -0400405 struct trace_array *tr = top_trace_array();
406
407 return __ftrace_set_clr_event(tr, NULL, system, event, set);
Steven Rostedt4671c792009-05-08 16:27:41 -0400408}
Yuanhan Liu56355b82010-11-08 14:05:12 +0800409EXPORT_SYMBOL_GPL(trace_set_clr_event);
Steven Rostedt4671c792009-05-08 16:27:41 -0400410
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500411/* 128 should be much more than enough */
412#define EVENT_BUF_SIZE 127
413
414static ssize_t
415ftrace_event_write(struct file *file, const char __user *ubuf,
416 size_t cnt, loff_t *ppos)
417{
jolsa@redhat.com48966362009-09-11 17:29:28 +0200418 struct trace_parser parser;
Steven Rostedtae63b312012-05-03 23:09:03 -0400419 struct seq_file *m = file->private_data;
420 struct trace_array *tr = m->private;
Li Zefan4ba79782009-09-22 13:52:20 +0800421 ssize_t read, ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500422
Li Zefan4ba79782009-09-22 13:52:20 +0800423 if (!cnt)
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500424 return 0;
425
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400426 ret = tracing_update_buffers();
427 if (ret < 0)
428 return ret;
429
jolsa@redhat.com48966362009-09-11 17:29:28 +0200430 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500431 return -ENOMEM;
432
jolsa@redhat.com48966362009-09-11 17:29:28 +0200433 read = trace_get_user(&parser, ubuf, cnt, ppos);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500434
Li Zefan4ba79782009-09-22 13:52:20 +0800435 if (read >= 0 && trace_parser_loaded((&parser))) {
jolsa@redhat.com48966362009-09-11 17:29:28 +0200436 int set = 1;
437
438 if (*parser.buffer == '!')
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500439 set = 0;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500440
jolsa@redhat.com48966362009-09-11 17:29:28 +0200441 parser.buffer[parser.idx] = 0;
442
Steven Rostedtae63b312012-05-03 23:09:03 -0400443 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500444 if (ret)
jolsa@redhat.com48966362009-09-11 17:29:28 +0200445 goto out_put;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500446 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500447
448 ret = read;
449
jolsa@redhat.com48966362009-09-11 17:29:28 +0200450 out_put:
451 trace_parser_put(&parser);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500452
453 return ret;
454}
455
456static void *
457t_next(struct seq_file *m, void *v, loff_t *pos)
458{
Steven Rostedtae63b312012-05-03 23:09:03 -0400459 struct ftrace_event_file *file = v;
460 struct ftrace_event_call *call;
461 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500462
463 (*pos)++;
464
Steven Rostedtae63b312012-05-03 23:09:03 -0400465 list_for_each_entry_continue(file, &tr->events, list) {
466 call = file->event_call;
Steven Rostedt40e26812009-03-10 11:32:40 -0400467 /*
468 * The ftrace subsystem is for showing formats only.
469 * They can not be enabled or disabled via the event files.
470 */
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400471 if (call->class && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -0400472 return file;
Steven Rostedt40e26812009-03-10 11:32:40 -0400473 }
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500474
Li Zefan30bd39c2009-09-18 14:07:05 +0800475 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500476}
477
478static void *t_start(struct seq_file *m, loff_t *pos)
479{
Steven Rostedtae63b312012-05-03 23:09:03 -0400480 struct ftrace_event_file *file;
481 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800482 loff_t l;
483
Li Zefan20c89282009-05-06 10:33:45 +0800484 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800485
Steven Rostedtae63b312012-05-03 23:09:03 -0400486 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800487 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400488 file = t_next(m, file, &l);
489 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800490 break;
491 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400492 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500493}
494
495static void *
496s_next(struct seq_file *m, void *v, loff_t *pos)
497{
Steven Rostedtae63b312012-05-03 23:09:03 -0400498 struct ftrace_event_file *file = v;
499 struct trace_array *tr = m->private;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500500
501 (*pos)++;
502
Steven Rostedtae63b312012-05-03 23:09:03 -0400503 list_for_each_entry_continue(file, &tr->events, list) {
504 if (file->flags & FTRACE_EVENT_FL_ENABLED)
505 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500506 }
507
Li Zefan30bd39c2009-09-18 14:07:05 +0800508 return NULL;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500509}
510
511static void *s_start(struct seq_file *m, loff_t *pos)
512{
Steven Rostedtae63b312012-05-03 23:09:03 -0400513 struct ftrace_event_file *file;
514 struct trace_array *tr = m->private;
Li Zefane1c7e2a2009-06-24 09:52:29 +0800515 loff_t l;
516
Li Zefan20c89282009-05-06 10:33:45 +0800517 mutex_lock(&event_mutex);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800518
Steven Rostedtae63b312012-05-03 23:09:03 -0400519 file = list_entry(&tr->events, struct ftrace_event_file, list);
Li Zefane1c7e2a2009-06-24 09:52:29 +0800520 for (l = 0; l <= *pos; ) {
Steven Rostedtae63b312012-05-03 23:09:03 -0400521 file = s_next(m, file, &l);
522 if (!file)
Li Zefane1c7e2a2009-06-24 09:52:29 +0800523 break;
524 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400525 return file;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500526}
527
528static int t_show(struct seq_file *m, void *v)
529{
Steven Rostedtae63b312012-05-03 23:09:03 -0400530 struct ftrace_event_file *file = v;
531 struct ftrace_event_call *call = file->event_call;
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500532
Steven Rostedt8f082012010-04-20 10:47:33 -0400533 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
534 seq_printf(m, "%s:", call->class->system);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500535 seq_printf(m, "%s\n", call->name);
536
537 return 0;
538}
539
540static void t_stop(struct seq_file *m, void *p)
541{
Li Zefan20c89282009-05-06 10:33:45 +0800542 mutex_unlock(&event_mutex);
Steven Rostedtb77e38a2009-02-24 10:21:36 -0500543}
544
Steven Rostedt1473e442009-02-24 14:15:08 -0500545static ssize_t
546event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
547 loff_t *ppos)
548{
Steven Rostedtae63b312012-05-03 23:09:03 -0400549 struct ftrace_event_file *file = filp->private_data;
Steven Rostedt1473e442009-02-24 14:15:08 -0500550 char *buf;
551
Steven Rostedtae63b312012-05-03 23:09:03 -0400552 if (file->flags & FTRACE_EVENT_FL_ENABLED)
Steven Rostedt1473e442009-02-24 14:15:08 -0500553 buf = "1\n";
554 else
555 buf = "0\n";
556
557 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
558}
559
560static ssize_t
561event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
562 loff_t *ppos)
563{
Steven Rostedtae63b312012-05-03 23:09:03 -0400564 struct ftrace_event_file *file = filp->private_data;
Steven Rostedt1473e442009-02-24 14:15:08 -0500565 unsigned long val;
566 int ret;
567
Steven Rostedtae63b312012-05-03 23:09:03 -0400568 if (!file)
569 return -EINVAL;
570
Peter Huewe22fe9b52011-06-07 21:58:27 +0200571 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
572 if (ret)
Steven Rostedt1473e442009-02-24 14:15:08 -0500573 return ret;
574
Steven Rostedt1852fcc2009-03-11 14:33:00 -0400575 ret = tracing_update_buffers();
576 if (ret < 0)
577 return ret;
578
Steven Rostedt1473e442009-02-24 14:15:08 -0500579 switch (val) {
580 case 0:
Steven Rostedt1473e442009-02-24 14:15:08 -0500581 case 1:
Steven Rostedt11a241a2009-03-02 11:49:04 -0500582 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400583 ret = ftrace_event_enable_disable(file, val);
Steven Rostedt11a241a2009-03-02 11:49:04 -0500584 mutex_unlock(&event_mutex);
Steven Rostedt1473e442009-02-24 14:15:08 -0500585 break;
586
587 default:
588 return -EINVAL;
589 }
590
591 *ppos += cnt;
592
Li Zefan3b8e4272009-12-08 11:14:52 +0800593 return ret ? ret : cnt;
Steven Rostedt1473e442009-02-24 14:15:08 -0500594}
595
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400596static ssize_t
597system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
598 loff_t *ppos)
599{
Li Zefanc142b152009-05-08 10:32:05 +0800600 const char set_to_char[4] = { '?', '0', '1', 'X' };
Steven Rostedtae63b312012-05-03 23:09:03 -0400601 struct ftrace_subsystem_dir *dir = filp->private_data;
602 struct event_subsystem *system = dir->subsystem;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400603 struct ftrace_event_call *call;
Steven Rostedtae63b312012-05-03 23:09:03 -0400604 struct ftrace_event_file *file;
605 struct trace_array *tr = dir->tr;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400606 char buf[2];
Li Zefanc142b152009-05-08 10:32:05 +0800607 int set = 0;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400608 int ret;
609
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400610 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400611 list_for_each_entry(file, &tr->events, list) {
612 call = file->event_call;
Steven Rostedta1d0ce82010-06-08 11:22:06 -0400613 if (!call->name || !call->class || !call->class->reg)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400614 continue;
615
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400616 if (system && strcmp(call->class->system, system->name) != 0)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400617 continue;
618
619 /*
620 * We need to find out if all the events are set
621 * or if all events or cleared, or if we have
622 * a mixture.
623 */
Steven Rostedtae63b312012-05-03 23:09:03 -0400624 set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED));
Li Zefanc142b152009-05-08 10:32:05 +0800625
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400626 /*
627 * If we have a mixture, no need to look further.
628 */
Li Zefanc142b152009-05-08 10:32:05 +0800629 if (set == 3)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400630 break;
631 }
632 mutex_unlock(&event_mutex);
633
Li Zefanc142b152009-05-08 10:32:05 +0800634 buf[0] = set_to_char[set];
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400635 buf[1] = '\n';
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400636
637 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
638
639 return ret;
640}
641
642static ssize_t
643system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
644 loff_t *ppos)
645{
Steven Rostedtae63b312012-05-03 23:09:03 -0400646 struct ftrace_subsystem_dir *dir = filp->private_data;
647 struct event_subsystem *system = dir->subsystem;
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400648 const char *name = NULL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400649 unsigned long val;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400650 ssize_t ret;
651
Peter Huewe22fe9b52011-06-07 21:58:27 +0200652 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
653 if (ret)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400654 return ret;
655
656 ret = tracing_update_buffers();
657 if (ret < 0)
658 return ret;
659
Li Zefan8f31bfe2009-05-08 10:31:42 +0800660 if (val != 0 && val != 1)
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400661 return -EINVAL;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400662
Steven Rostedt40ee4df2011-07-05 14:32:51 -0400663 /*
664 * Opening of "enable" adds a ref count to system,
665 * so the name is safe to use.
666 */
667 if (system)
668 name = system->name;
669
Steven Rostedtae63b312012-05-03 23:09:03 -0400670 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400671 if (ret)
Li Zefan8f31bfe2009-05-08 10:31:42 +0800672 goto out;
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400673
674 ret = cnt;
675
Li Zefan8f31bfe2009-05-08 10:31:42 +0800676out:
Steven Rostedt8ae79a12009-05-06 22:52:15 -0400677 *ppos += cnt;
678
679 return ret;
680}
681
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400682enum {
683 FORMAT_HEADER = 1,
Li Zefan86397dc2010-08-17 13:53:06 +0800684 FORMAT_FIELD_SEPERATOR = 2,
685 FORMAT_PRINTFMT = 3,
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400686};
687
688static void *f_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedt981d0812009-03-02 13:53:59 -0500689{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400690 struct ftrace_event_call *call = m->private;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800691 struct ftrace_event_field *field;
Li Zefan86397dc2010-08-17 13:53:06 +0800692 struct list_head *common_head = &ftrace_common_fields;
693 struct list_head *head = trace_get_fields(call);
Steven Rostedt981d0812009-03-02 13:53:59 -0500694
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400695 (*pos)++;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800696
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400697 switch ((unsigned long)v) {
698 case FORMAT_HEADER:
Li Zefan86397dc2010-08-17 13:53:06 +0800699 if (unlikely(list_empty(common_head)))
700 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800701
Li Zefan86397dc2010-08-17 13:53:06 +0800702 field = list_entry(common_head->prev,
703 struct ftrace_event_field, link);
704 return field;
705
706 case FORMAT_FIELD_SEPERATOR:
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400707 if (unlikely(list_empty(head)))
708 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800709
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400710 field = list_entry(head->prev, struct ftrace_event_field, link);
711 return field;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800712
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400713 case FORMAT_PRINTFMT:
714 /* all done */
715 return NULL;
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800716 }
717
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400718 field = v;
Li Zefan86397dc2010-08-17 13:53:06 +0800719 if (field->link.prev == common_head)
720 return (void *)FORMAT_FIELD_SEPERATOR;
721 else if (field->link.prev == head)
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400722 return (void *)FORMAT_PRINTFMT;
723
724 field = list_entry(field->link.prev, struct ftrace_event_field, link);
725
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400726 return field;
Li Zefan8728fe52010-05-24 16:22:49 +0800727}
Lai Jiangshan5a65e952009-12-15 15:39:53 +0800728
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400729static void *f_start(struct seq_file *m, loff_t *pos)
Li Zefan8728fe52010-05-24 16:22:49 +0800730{
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400731 loff_t l = 0;
732 void *p;
Li Zefan8728fe52010-05-24 16:22:49 +0800733
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400734 /* Start by showing the header */
735 if (!*pos)
736 return (void *)FORMAT_HEADER;
737
738 p = (void *)FORMAT_HEADER;
739 do {
740 p = f_next(m, p, &l);
741 } while (p && l < *pos);
742
743 return p;
744}
745
746static int f_show(struct seq_file *m, void *v)
747{
748 struct ftrace_event_call *call = m->private;
749 struct ftrace_event_field *field;
750 const char *array_descriptor;
751
752 switch ((unsigned long)v) {
753 case FORMAT_HEADER:
754 seq_printf(m, "name: %s\n", call->name);
755 seq_printf(m, "ID: %d\n", call->event.type);
756 seq_printf(m, "format:\n");
Li Zefan8728fe52010-05-24 16:22:49 +0800757 return 0;
758
Li Zefan86397dc2010-08-17 13:53:06 +0800759 case FORMAT_FIELD_SEPERATOR:
760 seq_putc(m, '\n');
761 return 0;
762
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400763 case FORMAT_PRINTFMT:
764 seq_printf(m, "\nprint fmt: %s\n",
765 call->print_fmt);
766 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500767 }
768
Steven Rostedt2a37a3d2010-06-03 15:21:34 -0400769 field = v;
770
771 /*
772 * Smartly shows the array type(except dynamic array).
773 * Normal:
774 * field:TYPE VAR
775 * If TYPE := TYPE[LEN], it is shown:
776 * field:TYPE VAR[LEN]
777 */
778 array_descriptor = strchr(field->type, '[');
779
780 if (!strncmp(field->type, "__data_loc", 10))
781 array_descriptor = NULL;
782
783 if (!array_descriptor)
784 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
785 field->type, field->name, field->offset,
786 field->size, !!field->is_signed);
787 else
788 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
789 (int)(array_descriptor - field->type),
790 field->type, field->name,
791 array_descriptor, field->offset,
792 field->size, !!field->is_signed);
793
794 return 0;
795}
796
797static void f_stop(struct seq_file *m, void *p)
798{
799}
800
801static const struct seq_operations trace_format_seq_ops = {
802 .start = f_start,
803 .next = f_next,
804 .stop = f_stop,
805 .show = f_show,
806};
807
808static int trace_format_open(struct inode *inode, struct file *file)
809{
810 struct ftrace_event_call *call = inode->i_private;
811 struct seq_file *m;
812 int ret;
813
814 ret = seq_open(file, &trace_format_seq_ops);
815 if (ret < 0)
816 return ret;
817
818 m = file->private_data;
819 m->private = call;
820
821 return 0;
Steven Rostedt981d0812009-03-02 13:53:59 -0500822}
823
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100824static ssize_t
825event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
826{
827 struct ftrace_event_call *call = filp->private_data;
828 struct trace_seq *s;
829 int r;
830
831 if (*ppos)
832 return 0;
833
834 s = kmalloc(sizeof(*s), GFP_KERNEL);
835 if (!s)
836 return -ENOMEM;
837
838 trace_seq_init(s);
Steven Rostedt32c0eda2010-04-23 10:38:03 -0400839 trace_seq_printf(s, "%d\n", call->event.type);
Peter Zijlstra23725ae2009-03-19 20:26:13 +0100840
841 r = simple_read_from_buffer(ubuf, cnt, ppos,
842 s->buffer, s->len);
843 kfree(s);
844 return r;
845}
846
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500847static ssize_t
848event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
849 loff_t *ppos)
850{
851 struct ftrace_event_call *call = filp->private_data;
852 struct trace_seq *s;
853 int r;
854
855 if (*ppos)
856 return 0;
857
858 s = kmalloc(sizeof(*s), GFP_KERNEL);
859 if (!s)
860 return -ENOMEM;
861
862 trace_seq_init(s);
863
Tom Zanussi8b372562009-04-28 03:04:59 -0500864 print_event_filter(call, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -0500865 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500866
867 kfree(s);
868
869 return r;
870}
871
872static ssize_t
873event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
874 loff_t *ppos)
875{
876 struct ftrace_event_call *call = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -0500877 char *buf;
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500878 int err;
879
Tom Zanussi8b372562009-04-28 03:04:59 -0500880 if (cnt >= PAGE_SIZE)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500881 return -EINVAL;
882
Tom Zanussi8b372562009-04-28 03:04:59 -0500883 buf = (char *)__get_free_page(GFP_TEMPORARY);
884 if (!buf)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500885 return -ENOMEM;
886
Tom Zanussi8b372562009-04-28 03:04:59 -0500887 if (copy_from_user(buf, ubuf, cnt)) {
888 free_page((unsigned long) buf);
889 return -EFAULT;
890 }
891 buf[cnt] = '\0';
892
893 err = apply_event_filter(call, buf);
894 free_page((unsigned long) buf);
895 if (err < 0)
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500896 return err;
Tom Zanussi0a19e532009-04-13 03:17:50 -0500897
Tom Zanussi7ce7e422009-03-22 03:31:04 -0500898 *ppos += cnt;
899
900 return cnt;
901}
902
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400903static LIST_HEAD(event_subsystems);
904
905static int subsystem_open(struct inode *inode, struct file *filp)
906{
907 struct event_subsystem *system = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -0400908 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */
909 struct trace_array *tr;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400910 int ret;
911
912 /* Make sure the system still exists */
913 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -0400914 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
915 list_for_each_entry(dir, &tr->systems, list) {
916 if (dir == inode->i_private) {
917 /* Don't open systems with no events */
918 if (dir->nr_events) {
919 __get_system_dir(dir);
920 system = dir->subsystem;
921 }
922 goto exit_loop;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400923 }
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400924 }
925 }
Steven Rostedtae63b312012-05-03 23:09:03 -0400926 exit_loop:
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400927 mutex_unlock(&event_mutex);
928
Steven Rostedtae63b312012-05-03 23:09:03 -0400929 if (!system)
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400930 return -ENODEV;
931
Steven Rostedtae63b312012-05-03 23:09:03 -0400932 /* Some versions of gcc think dir can be uninitialized here */
933 WARN_ON(!dir);
934
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400935 ret = tracing_open_generic(inode, filp);
Steven Rostedtae63b312012-05-03 23:09:03 -0400936 if (ret < 0)
937 put_system(dir);
938
939 return ret;
940}
941
942static int system_tr_open(struct inode *inode, struct file *filp)
943{
944 struct ftrace_subsystem_dir *dir;
945 struct trace_array *tr = inode->i_private;
946 int ret;
947
948 /* Make a temporary dir that has no system but points to tr */
949 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
950 if (!dir)
951 return -ENOMEM;
952
953 dir->tr = tr;
954
955 ret = tracing_open_generic(inode, filp);
956 if (ret < 0)
957 kfree(dir);
958
959 filp->private_data = dir;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400960
961 return ret;
962}
963
964static int subsystem_release(struct inode *inode, struct file *file)
965{
Steven Rostedtae63b312012-05-03 23:09:03 -0400966 struct ftrace_subsystem_dir *dir = file->private_data;
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400967
Steven Rostedtae63b312012-05-03 23:09:03 -0400968 /*
969 * If dir->subsystem is NULL, then this is a temporary
970 * descriptor that was made for a trace_array to enable
971 * all subsystems.
972 */
973 if (dir->subsystem)
974 put_system(dir);
975 else
976 kfree(dir);
Steven Rostedte9dbfae2011-07-05 11:36:06 -0400977
978 return 0;
979}
980
Tom Zanussicfb180f2009-03-22 03:31:17 -0500981static ssize_t
982subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
983 loff_t *ppos)
984{
Steven Rostedtae63b312012-05-03 23:09:03 -0400985 struct ftrace_subsystem_dir *dir = filp->private_data;
986 struct event_subsystem *system = dir->subsystem;
Tom Zanussicfb180f2009-03-22 03:31:17 -0500987 struct trace_seq *s;
988 int r;
989
990 if (*ppos)
991 return 0;
992
993 s = kmalloc(sizeof(*s), GFP_KERNEL);
994 if (!s)
995 return -ENOMEM;
996
997 trace_seq_init(s);
998
Tom Zanussi8b372562009-04-28 03:04:59 -0500999 print_subsystem_event_filter(system, s);
Tom Zanussi4bda2d52009-03-24 02:14:31 -05001000 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
Tom Zanussicfb180f2009-03-22 03:31:17 -05001001
1002 kfree(s);
1003
1004 return r;
1005}
1006
1007static ssize_t
1008subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1009 loff_t *ppos)
1010{
Steven Rostedtae63b312012-05-03 23:09:03 -04001011 struct ftrace_subsystem_dir *dir = filp->private_data;
Tom Zanussi8b372562009-04-28 03:04:59 -05001012 char *buf;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001013 int err;
1014
Tom Zanussi8b372562009-04-28 03:04:59 -05001015 if (cnt >= PAGE_SIZE)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001016 return -EINVAL;
1017
Tom Zanussi8b372562009-04-28 03:04:59 -05001018 buf = (char *)__get_free_page(GFP_TEMPORARY);
1019 if (!buf)
Tom Zanussicfb180f2009-03-22 03:31:17 -05001020 return -ENOMEM;
1021
Tom Zanussi8b372562009-04-28 03:04:59 -05001022 if (copy_from_user(buf, ubuf, cnt)) {
1023 free_page((unsigned long) buf);
1024 return -EFAULT;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001025 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001026 buf[cnt] = '\0';
Tom Zanussicfb180f2009-03-22 03:31:17 -05001027
Steven Rostedtae63b312012-05-03 23:09:03 -04001028 err = apply_subsystem_event_filter(dir, buf);
Tom Zanussi8b372562009-04-28 03:04:59 -05001029 free_page((unsigned long) buf);
1030 if (err < 0)
Li Zefan44e9c8b2009-04-11 15:55:28 +08001031 return err;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001032
1033 *ppos += cnt;
1034
1035 return cnt;
1036}
1037
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001038static ssize_t
1039show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1040{
1041 int (*func)(struct trace_seq *s) = filp->private_data;
1042 struct trace_seq *s;
1043 int r;
1044
1045 if (*ppos)
1046 return 0;
1047
1048 s = kmalloc(sizeof(*s), GFP_KERNEL);
1049 if (!s)
1050 return -ENOMEM;
1051
1052 trace_seq_init(s);
1053
1054 func(s);
1055 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
1056
1057 kfree(s);
1058
1059 return r;
1060}
1061
Steven Rostedt15075ca2012-05-03 14:57:28 -04001062static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1063static int ftrace_event_set_open(struct inode *inode, struct file *file);
1064
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001065static const struct seq_operations show_event_seq_ops = {
1066 .start = t_start,
1067 .next = t_next,
1068 .show = t_show,
1069 .stop = t_stop,
1070};
1071
1072static const struct seq_operations show_set_event_seq_ops = {
1073 .start = s_start,
1074 .next = s_next,
1075 .show = t_show,
1076 .stop = t_stop,
1077};
1078
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001079static const struct file_operations ftrace_avail_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001080 .open = ftrace_event_avail_open,
Steven Rostedt2314c4a2009-03-10 12:04:02 -04001081 .read = seq_read,
1082 .llseek = seq_lseek,
1083 .release = seq_release,
1084};
1085
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001086static const struct file_operations ftrace_set_event_fops = {
Steven Rostedt15075ca2012-05-03 14:57:28 -04001087 .open = ftrace_event_set_open,
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001088 .read = seq_read,
1089 .write = ftrace_event_write,
1090 .llseek = seq_lseek,
1091 .release = seq_release,
1092};
1093
Steven Rostedt1473e442009-02-24 14:15:08 -05001094static const struct file_operations ftrace_enable_fops = {
1095 .open = tracing_open_generic,
1096 .read = event_enable_read,
1097 .write = event_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001098 .llseek = default_llseek,
Steven Rostedt1473e442009-02-24 14:15:08 -05001099};
1100
Steven Rostedt981d0812009-03-02 13:53:59 -05001101static const struct file_operations ftrace_event_format_fops = {
Steven Rostedt2a37a3d2010-06-03 15:21:34 -04001102 .open = trace_format_open,
1103 .read = seq_read,
1104 .llseek = seq_lseek,
1105 .release = seq_release,
Steven Rostedt981d0812009-03-02 13:53:59 -05001106};
1107
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001108static const struct file_operations ftrace_event_id_fops = {
1109 .open = tracing_open_generic,
1110 .read = event_id_read,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001111 .llseek = default_llseek,
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001112};
1113
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001114static const struct file_operations ftrace_event_filter_fops = {
1115 .open = tracing_open_generic,
1116 .read = event_filter_read,
1117 .write = event_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001118 .llseek = default_llseek,
Tom Zanussi7ce7e422009-03-22 03:31:04 -05001119};
1120
Tom Zanussicfb180f2009-03-22 03:31:17 -05001121static const struct file_operations ftrace_subsystem_filter_fops = {
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001122 .open = subsystem_open,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001123 .read = subsystem_filter_read,
1124 .write = subsystem_filter_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001125 .llseek = default_llseek,
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001126 .release = subsystem_release,
Tom Zanussicfb180f2009-03-22 03:31:17 -05001127};
1128
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001129static const struct file_operations ftrace_system_enable_fops = {
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001130 .open = subsystem_open,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001131 .read = system_enable_read,
1132 .write = system_enable_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001133 .llseek = default_llseek,
Steven Rostedt40ee4df2011-07-05 14:32:51 -04001134 .release = subsystem_release,
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001135};
1136
Steven Rostedtae63b312012-05-03 23:09:03 -04001137static const struct file_operations ftrace_tr_enable_fops = {
1138 .open = system_tr_open,
1139 .read = system_enable_read,
1140 .write = system_enable_write,
1141 .llseek = default_llseek,
1142 .release = subsystem_release,
1143};
1144
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001145static const struct file_operations ftrace_show_header_fops = {
1146 .open = tracing_open_generic,
1147 .read = show_header,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001148 .llseek = default_llseek,
Steven Rostedtd1b182a2009-04-15 16:53:47 -04001149};
1150
Steven Rostedtae63b312012-05-03 23:09:03 -04001151static int
1152ftrace_event_open(struct inode *inode, struct file *file,
1153 const struct seq_operations *seq_ops)
Steven Rostedt1473e442009-02-24 14:15:08 -05001154{
Steven Rostedtae63b312012-05-03 23:09:03 -04001155 struct seq_file *m;
1156 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001157
Steven Rostedtae63b312012-05-03 23:09:03 -04001158 ret = seq_open(file, seq_ops);
1159 if (ret < 0)
1160 return ret;
1161 m = file->private_data;
1162 /* copy tr over to seq ops */
1163 m->private = inode->i_private;
Steven Rostedt1473e442009-02-24 14:15:08 -05001164
Steven Rostedtae63b312012-05-03 23:09:03 -04001165 return ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001166}
1167
Steven Rostedt15075ca2012-05-03 14:57:28 -04001168static int
1169ftrace_event_avail_open(struct inode *inode, struct file *file)
1170{
1171 const struct seq_operations *seq_ops = &show_event_seq_ops;
1172
Steven Rostedtae63b312012-05-03 23:09:03 -04001173 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001174}
1175
1176static int
1177ftrace_event_set_open(struct inode *inode, struct file *file)
1178{
1179 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
Steven Rostedtae63b312012-05-03 23:09:03 -04001180 struct trace_array *tr = inode->i_private;
Steven Rostedt15075ca2012-05-03 14:57:28 -04001181
1182 if ((file->f_mode & FMODE_WRITE) &&
1183 (file->f_flags & O_TRUNC))
Steven Rostedtae63b312012-05-03 23:09:03 -04001184 ftrace_clear_events(tr);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001185
Steven Rostedtae63b312012-05-03 23:09:03 -04001186 return ftrace_event_open(inode, file, seq_ops);
Steven Rostedt15075ca2012-05-03 14:57:28 -04001187}
1188
Steven Rostedtae63b312012-05-03 23:09:03 -04001189static struct event_subsystem *
1190create_new_subsystem(const char *name)
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001191{
1192 struct event_subsystem *system;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001193
1194 /* need to create new entry */
1195 system = kmalloc(sizeof(*system), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001196 if (!system)
1197 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001198
Steven Rostedte9dbfae2011-07-05 11:36:06 -04001199 system->ref_count = 1;
Steven Rostedt6d723732009-04-10 14:53:50 -04001200 system->name = kstrdup(name, GFP_KERNEL);
Steven Rostedt6d723732009-04-10 14:53:50 -04001201
Steven Rostedtae63b312012-05-03 23:09:03 -04001202 if (!system->name)
1203 goto out_free;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001204
Tom Zanussi30e673b2009-04-28 03:04:47 -05001205 system->filter = NULL;
Tom Zanussicfb180f2009-03-22 03:31:17 -05001206
Tom Zanussi8b372562009-04-28 03:04:59 -05001207 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
Steven Rostedtae63b312012-05-03 23:09:03 -04001208 if (!system->filter)
1209 goto out_free;
1210
1211 list_add(&system->list, &event_subsystems);
1212
1213 return system;
1214
1215 out_free:
1216 kfree(system->name);
1217 kfree(system);
1218 return NULL;
1219}
1220
1221static struct dentry *
1222event_subsystem_dir(struct trace_array *tr, const char *name,
1223 struct ftrace_event_file *file, struct dentry *parent)
1224{
1225 struct ftrace_subsystem_dir *dir;
1226 struct event_subsystem *system;
1227 struct dentry *entry;
1228
1229 /* First see if we did not already create this dir */
1230 list_for_each_entry(dir, &tr->systems, list) {
1231 system = dir->subsystem;
1232 if (strcmp(system->name, name) == 0) {
1233 dir->nr_events++;
1234 file->system = dir;
1235 return dir->entry;
1236 }
Tom Zanussi8b372562009-04-28 03:04:59 -05001237 }
1238
Steven Rostedtae63b312012-05-03 23:09:03 -04001239 /* Now see if the system itself exists. */
1240 list_for_each_entry(system, &event_subsystems, list) {
1241 if (strcmp(system->name, name) == 0)
1242 break;
1243 }
1244 /* Reset system variable when not found */
1245 if (&system->list == &event_subsystems)
1246 system = NULL;
1247
1248 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1249 if (!dir)
1250 goto out_fail;
1251
1252 if (!system) {
1253 system = create_new_subsystem(name);
1254 if (!system)
1255 goto out_free;
1256 } else
1257 __get_system(system);
1258
1259 dir->entry = debugfs_create_dir(name, parent);
1260 if (!dir->entry) {
1261 pr_warning("Failed to create system directory %s\n", name);
1262 __put_system(system);
1263 goto out_free;
1264 }
1265
1266 dir->tr = tr;
1267 dir->ref_count = 1;
1268 dir->nr_events = 1;
1269 dir->subsystem = system;
1270 file->system = dir;
1271
1272 entry = debugfs_create_file("filter", 0644, dir->entry, dir,
Tom Zanussie1112b42009-03-31 00:48:49 -05001273 &ftrace_subsystem_filter_fops);
Tom Zanussi8b372562009-04-28 03:04:59 -05001274 if (!entry) {
1275 kfree(system->filter);
1276 system->filter = NULL;
Steven Rostedtae63b312012-05-03 23:09:03 -04001277 pr_warning("Could not create debugfs '%s/filter' entry\n", name);
Tom Zanussi8b372562009-04-28 03:04:59 -05001278 }
Tom Zanussie1112b42009-03-31 00:48:49 -05001279
Steven Rostedtae63b312012-05-03 23:09:03 -04001280 trace_create_file("enable", 0644, dir->entry, dir,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001281 &ftrace_system_enable_fops);
Steven Rostedt8ae79a12009-05-06 22:52:15 -04001282
Steven Rostedtae63b312012-05-03 23:09:03 -04001283 list_add(&dir->list, &tr->systems);
1284
1285 return dir->entry;
1286
1287 out_free:
1288 kfree(dir);
1289 out_fail:
1290 /* Only print this message if failed on memory allocation */
1291 if (!dir || !system)
1292 pr_warning("No memory to create event subsystem %s\n",
1293 name);
1294 return NULL;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001295}
1296
Steven Rostedt1473e442009-02-24 14:15:08 -05001297static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001298event_create_dir(struct dentry *parent,
1299 struct ftrace_event_file *file,
Steven Rostedt701970b2009-04-24 23:11:22 -04001300 const struct file_operations *id,
1301 const struct file_operations *enable,
1302 const struct file_operations *filter,
1303 const struct file_operations *format)
Steven Rostedt1473e442009-02-24 14:15:08 -05001304{
Steven Rostedtae63b312012-05-03 23:09:03 -04001305 struct ftrace_event_call *call = file->event_call;
1306 struct trace_array *tr = file->tr;
Steven Rostedt2e33af02010-04-22 10:35:55 -04001307 struct list_head *head;
Steven Rostedtae63b312012-05-03 23:09:03 -04001308 struct dentry *d_events;
Steven Rostedtfd994982009-02-28 02:41:25 -05001309 int ret;
Steven Rostedt1473e442009-02-24 14:15:08 -05001310
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001311 /*
1312 * If the trace point header did not define TRACE_SYSTEM
1313 * then the system would be called "TRACE_SYSTEM".
1314 */
Steven Rostedtae63b312012-05-03 23:09:03 -04001315 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
1316 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
1317 if (!d_events)
1318 return -ENOMEM;
1319 } else
1320 d_events = parent;
Steven Rostedt6ecc2d12009-02-27 21:33:02 -05001321
Steven Rostedtae63b312012-05-03 23:09:03 -04001322 file->dir = debugfs_create_dir(call->name, d_events);
1323 if (!file->dir) {
1324 pr_warning("Could not create debugfs '%s' directory\n",
1325 call->name);
Steven Rostedt1473e442009-02-24 14:15:08 -05001326 return -1;
1327 }
1328
Steven Rostedt9b637762012-05-10 15:55:43 -04001329 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
Steven Rostedtae63b312012-05-03 23:09:03 -04001330 trace_create_file("enable", 0644, file->dir, file,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001331 enable);
Steven Rostedt1473e442009-02-24 14:15:08 -05001332
Steven Rostedt22392912010-04-21 12:27:06 -04001333#ifdef CONFIG_PERF_EVENTS
Steven Rostedta1d0ce82010-06-08 11:22:06 -04001334 if (call->event.type && call->class->reg)
Steven Rostedtae63b312012-05-03 23:09:03 -04001335 trace_create_file("id", 0444, file->dir, call,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001336 id);
Steven Rostedt22392912010-04-21 12:27:06 -04001337#endif
Peter Zijlstra23725ae2009-03-19 20:26:13 +01001338
Li Zefanc9d932c2010-05-24 16:24:28 +08001339 /*
1340 * Other events may have the same class. Only update
1341 * the fields if they are not already defined.
1342 */
1343 head = trace_get_fields(call);
1344 if (list_empty(head)) {
1345 ret = call->class->define_fields(call);
1346 if (ret < 0) {
1347 pr_warning("Could not initialize trace point"
1348 " events/%s\n", call->name);
Steven Rostedtae63b312012-05-03 23:09:03 -04001349 return -1;
Tom Zanussicf027f62009-03-22 03:30:39 -05001350 }
1351 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001352 trace_create_file("filter", 0644, file->dir, call,
Li Zefanc9d932c2010-05-24 16:24:28 +08001353 filter);
Tom Zanussicf027f62009-03-22 03:30:39 -05001354
Steven Rostedtae63b312012-05-03 23:09:03 -04001355 trace_create_file("format", 0444, file->dir, call,
Frederic Weisbeckerf3f3f002009-09-24 15:27:41 +02001356 format);
Steven Rostedtfd994982009-02-28 02:41:25 -05001357
Steven Rostedt1473e442009-02-24 14:15:08 -05001358 return 0;
1359}
1360
Steven Rostedtae63b312012-05-03 23:09:03 -04001361static void remove_subsystem(struct ftrace_subsystem_dir *dir)
1362{
1363 if (!dir)
1364 return;
1365
1366 if (!--dir->nr_events) {
1367 debugfs_remove_recursive(dir->entry);
1368 list_del(&dir->list);
1369 __put_system_dir(dir);
1370 }
1371}
1372
1373static void remove_event_from_tracers(struct ftrace_event_call *call)
1374{
1375 struct ftrace_event_file *file;
1376 struct trace_array *tr;
1377
1378 do_for_each_event_file_safe(tr, file) {
1379
1380 if (file->event_call != call)
1381 continue;
1382
1383 list_del(&file->list);
1384 debugfs_remove_recursive(file->dir);
1385 remove_subsystem(file->system);
1386 kfree(file);
1387
1388 /*
1389 * The do_for_each_event_file_safe() is
1390 * a double loop. After finding the call for this
1391 * trace_array, we use break to jump to the next
1392 * trace_array.
1393 */
1394 break;
1395 } while_for_each_event_file();
1396}
1397
Ezequiel Garcia87819152012-09-12 11:47:57 -03001398static void event_remove(struct ftrace_event_call *call)
1399{
Steven Rostedtae63b312012-05-03 23:09:03 -04001400 struct trace_array *tr;
1401 struct ftrace_event_file *file;
1402
1403 do_for_each_event_file(tr, file) {
1404 if (file->event_call != call)
1405 continue;
1406 ftrace_event_enable_disable(file, 0);
1407 /*
1408 * The do_for_each_event_file() is
1409 * a double loop. After finding the call for this
1410 * trace_array, we use break to jump to the next
1411 * trace_array.
1412 */
1413 break;
1414 } while_for_each_event_file();
1415
Ezequiel Garcia87819152012-09-12 11:47:57 -03001416 if (call->event.funcs)
1417 __unregister_ftrace_event(&call->event);
Steven Rostedtae63b312012-05-03 23:09:03 -04001418 remove_event_from_tracers(call);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001419 list_del(&call->list);
1420}
1421
1422static int event_init(struct ftrace_event_call *call)
1423{
1424 int ret = 0;
1425
1426 if (WARN_ON(!call->name))
1427 return -EINVAL;
1428
1429 if (call->class->raw_init) {
1430 ret = call->class->raw_init(call);
1431 if (ret < 0 && ret != -ENOSYS)
1432 pr_warn("Could not initialize trace events/%s\n",
1433 call->name);
1434 }
1435
1436 return ret;
1437}
1438
Li Zefan67ead0a2010-05-24 16:25:13 +08001439static int
Steven Rostedtae63b312012-05-03 23:09:03 -04001440__register_event(struct ftrace_event_call *call, struct module *mod)
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001441{
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001442 int ret;
Steven Rostedt6d723732009-04-10 14:53:50 -04001443
Ezequiel Garcia87819152012-09-12 11:47:57 -03001444 ret = event_init(call);
1445 if (ret < 0)
1446 return ret;
Steven Rostedt701970b2009-04-24 23:11:22 -04001447
Steven Rostedtae63b312012-05-03 23:09:03 -04001448 list_add(&call->list, &ftrace_events);
Li Zefan67ead0a2010-05-24 16:25:13 +08001449 call->mod = mod;
Masami Hiramatsu88f70d72009-09-25 11:20:54 -07001450
Steven Rostedtae63b312012-05-03 23:09:03 -04001451 return 0;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001452}
1453
Steven Rostedtae63b312012-05-03 23:09:03 -04001454/* Add an event to a trace directory */
1455static int
1456__trace_add_new_event(struct ftrace_event_call *call,
1457 struct trace_array *tr,
1458 const struct file_operations *id,
1459 const struct file_operations *enable,
1460 const struct file_operations *filter,
1461 const struct file_operations *format)
1462{
1463 struct ftrace_event_file *file;
1464
1465 file = kzalloc(sizeof(*file), GFP_KERNEL);
1466 if (!file)
1467 return -ENOMEM;
1468
1469 file->event_call = call;
1470 file->tr = tr;
1471 list_add(&file->list, &tr->events);
1472
1473 return event_create_dir(tr->event_dir, file, id, enable, filter, format);
1474}
1475
Steven Rostedt77248222013-02-27 16:28:06 -05001476/*
1477 * Just create a decriptor for early init. A descriptor is required
1478 * for enabling events at boot. We want to enable events before
1479 * the filesystem is initialized.
1480 */
1481static __init int
1482__trace_early_add_new_event(struct ftrace_event_call *call,
1483 struct trace_array *tr)
1484{
1485 struct ftrace_event_file *file;
1486
1487 file = kzalloc(sizeof(*file), GFP_KERNEL);
1488 if (!file)
1489 return -ENOMEM;
1490
1491 file->event_call = call;
1492 file->tr = tr;
1493 list_add(&file->list, &tr->events);
1494
1495 return 0;
1496}
1497
Steven Rostedtae63b312012-05-03 23:09:03 -04001498struct ftrace_module_file_ops;
1499static void __add_event_to_tracers(struct ftrace_event_call *call,
1500 struct ftrace_module_file_ops *file_ops);
1501
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001502/* Add an additional event_call dynamically */
1503int trace_add_event_call(struct ftrace_event_call *call)
1504{
1505 int ret;
1506 mutex_lock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001507
1508 ret = __register_event(call, NULL);
1509 if (ret >= 0)
1510 __add_event_to_tracers(call, NULL);
1511
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001512 mutex_unlock(&event_mutex);
1513 return ret;
1514}
Steven Rostedt701970b2009-04-24 23:11:22 -04001515
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001516/*
1517 * Must be called under locking both of event_mutex and trace_event_mutex.
1518 */
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001519static void __trace_remove_event_call(struct ftrace_event_call *call)
1520{
Ezequiel Garcia87819152012-09-12 11:47:57 -03001521 event_remove(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001522 trace_destroy_fields(call);
1523 destroy_preds(call);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001524}
1525
1526/* Remove an event_call */
1527void trace_remove_event_call(struct ftrace_event_call *call)
1528{
1529 mutex_lock(&event_mutex);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001530 down_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001531 __trace_remove_event_call(call);
Masami Hiramatsu4fead8e2009-09-14 16:49:12 -04001532 up_write(&trace_event_mutex);
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001533 mutex_unlock(&event_mutex);
1534}
1535
1536#define for_each_event(event, start, end) \
1537 for (event = start; \
1538 (unsigned long)event < (unsigned long)end; \
1539 event++)
1540
1541#ifdef CONFIG_MODULES
1542
1543static LIST_HEAD(ftrace_module_file_list);
1544
1545/*
1546 * Modules must own their file_operations to keep up with
1547 * reference counting.
1548 */
1549struct ftrace_module_file_ops {
1550 struct list_head list;
1551 struct module *mod;
1552 struct file_operations id;
1553 struct file_operations enable;
1554 struct file_operations format;
1555 struct file_operations filter;
1556};
1557
Steven Rostedtae63b312012-05-03 23:09:03 -04001558static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1559{
1560 struct ftrace_module_file_ops *file_ops;
1561
1562 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1563 if (file_ops->mod == mod)
1564 return file_ops;
1565 }
1566 return NULL;
1567}
1568
Steven Rostedt701970b2009-04-24 23:11:22 -04001569static struct ftrace_module_file_ops *
1570trace_create_file_ops(struct module *mod)
1571{
1572 struct ftrace_module_file_ops *file_ops;
1573
1574 /*
1575 * This is a bit of a PITA. To allow for correct reference
1576 * counting, modules must "own" their file_operations.
1577 * To do this, we allocate the file operations that will be
1578 * used in the event directory.
1579 */
1580
1581 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1582 if (!file_ops)
1583 return NULL;
1584
1585 file_ops->mod = mod;
1586
1587 file_ops->id = ftrace_event_id_fops;
1588 file_ops->id.owner = mod;
1589
1590 file_ops->enable = ftrace_enable_fops;
1591 file_ops->enable.owner = mod;
1592
1593 file_ops->filter = ftrace_event_filter_fops;
1594 file_ops->filter.owner = mod;
1595
1596 file_ops->format = ftrace_event_format_fops;
1597 file_ops->format.owner = mod;
1598
1599 list_add(&file_ops->list, &ftrace_module_file_list);
1600
1601 return file_ops;
1602}
1603
Steven Rostedt6d723732009-04-10 14:53:50 -04001604static void trace_module_add_events(struct module *mod)
1605{
Steven Rostedt701970b2009-04-24 23:11:22 -04001606 struct ftrace_module_file_ops *file_ops = NULL;
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001607 struct ftrace_event_call **call, **start, **end;
Steven Rostedt6d723732009-04-10 14:53:50 -04001608
1609 start = mod->trace_events;
1610 end = mod->trace_events + mod->num_trace_events;
1611
1612 if (start == end)
1613 return;
1614
Li Zefan67ead0a2010-05-24 16:25:13 +08001615 file_ops = trace_create_file_ops(mod);
1616 if (!file_ops)
Steven Rostedt6d723732009-04-10 14:53:50 -04001617 return;
1618
1619 for_each_event(call, start, end) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001620 __register_event(*call, mod);
1621 __add_event_to_tracers(*call, file_ops);
Steven Rostedt6d723732009-04-10 14:53:50 -04001622 }
1623}
1624
1625static void trace_module_remove_events(struct module *mod)
1626{
Steven Rostedt701970b2009-04-24 23:11:22 -04001627 struct ftrace_module_file_ops *file_ops;
Steven Rostedt6d723732009-04-10 14:53:50 -04001628 struct ftrace_event_call *call, *p;
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001629 bool found = false;
Steven Rostedt6d723732009-04-10 14:53:50 -04001630
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001631 down_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001632 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1633 if (call->mod == mod) {
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001634 found = true;
Masami Hiramatsubd1a5c82009-08-13 16:34:53 -04001635 __trace_remove_event_call(call);
Steven Rostedt6d723732009-04-10 14:53:50 -04001636 }
1637 }
Steven Rostedt701970b2009-04-24 23:11:22 -04001638
1639 /* Now free the file_operations */
1640 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1641 if (file_ops->mod == mod)
1642 break;
1643 }
1644 if (&file_ops->list != &ftrace_module_file_list) {
1645 list_del(&file_ops->list);
1646 kfree(file_ops);
1647 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001648
1649 /*
1650 * It is safest to reset the ring buffer if the module being unloaded
1651 * registered any events.
1652 */
1653 if (found)
1654 tracing_reset_current_online_cpus();
Steven Rostedt110bf2b2009-06-09 17:29:07 -04001655 up_write(&trace_event_mutex);
Steven Rostedt6d723732009-04-10 14:53:50 -04001656}
1657
Steven Rostedt61f919a2009-04-14 18:22:32 -04001658static int trace_module_notify(struct notifier_block *self,
1659 unsigned long val, void *data)
Steven Rostedt6d723732009-04-10 14:53:50 -04001660{
1661 struct module *mod = data;
1662
1663 mutex_lock(&event_mutex);
1664 switch (val) {
1665 case MODULE_STATE_COMING:
1666 trace_module_add_events(mod);
1667 break;
1668 case MODULE_STATE_GOING:
1669 trace_module_remove_events(mod);
1670 break;
1671 }
1672 mutex_unlock(&event_mutex);
1673
1674 return 0;
1675}
Steven Rostedt61f919a2009-04-14 18:22:32 -04001676#else
Steven Rostedtae63b312012-05-03 23:09:03 -04001677static struct ftrace_module_file_ops *find_ftrace_file_ops(struct module *mod)
1678{
1679 return NULL;
1680}
Steven Rostedt61f919a2009-04-14 18:22:32 -04001681static int trace_module_notify(struct notifier_block *self,
1682 unsigned long val, void *data)
1683{
1684 return 0;
1685}
1686#endif /* CONFIG_MODULES */
Steven Rostedt6d723732009-04-10 14:53:50 -04001687
Steven Rostedtae63b312012-05-03 23:09:03 -04001688/* Create a new event directory structure for a trace directory. */
1689static void
1690__trace_add_event_dirs(struct trace_array *tr)
1691{
1692 struct ftrace_module_file_ops *file_ops = NULL;
1693 struct ftrace_event_call *call;
1694 int ret;
1695
1696 list_for_each_entry(call, &ftrace_events, list) {
1697 if (call->mod) {
1698 /*
1699 * Directories for events by modules need to
1700 * keep module ref counts when opened (as we don't
1701 * want the module to disappear when reading one
1702 * of these files). The file_ops keep account of
1703 * the module ref count.
1704 *
1705 * As event_calls are added in groups by module,
1706 * when we find one file_ops, we don't need to search for
1707 * each call in that module, as the rest should be the
1708 * same. Only search for a new one if the last one did
1709 * not match.
1710 */
1711 if (!file_ops || call->mod != file_ops->mod)
1712 file_ops = find_ftrace_file_ops(call->mod);
1713 if (!file_ops)
1714 continue; /* Warn? */
1715 ret = __trace_add_new_event(call, tr,
1716 &file_ops->id, &file_ops->enable,
1717 &file_ops->filter, &file_ops->format);
1718 if (ret < 0)
1719 pr_warning("Could not create directory for event %s\n",
1720 call->name);
1721 continue;
1722 }
1723 ret = __trace_add_new_event(call, tr,
1724 &ftrace_event_id_fops,
1725 &ftrace_enable_fops,
1726 &ftrace_event_filter_fops,
1727 &ftrace_event_format_fops);
1728 if (ret < 0)
1729 pr_warning("Could not create directory for event %s\n",
1730 call->name);
1731 }
1732}
1733
Steven Rostedt77248222013-02-27 16:28:06 -05001734/*
1735 * The top level array has already had its ftrace_event_file
1736 * descriptors created in order to allow for early events to
1737 * be recorded. This function is called after the debugfs has been
1738 * initialized, and we now have to create the files associated
1739 * to the events.
1740 */
1741static __init void
1742__trace_early_add_event_dirs(struct trace_array *tr)
1743{
1744 struct ftrace_event_file *file;
1745 int ret;
1746
1747
1748 list_for_each_entry(file, &tr->events, list) {
1749 ret = event_create_dir(tr->event_dir, file,
1750 &ftrace_event_id_fops,
1751 &ftrace_enable_fops,
1752 &ftrace_event_filter_fops,
1753 &ftrace_event_format_fops);
1754 if (ret < 0)
1755 pr_warning("Could not create directory for event %s\n",
1756 file->event_call->name);
1757 }
1758}
1759
1760/*
1761 * For early boot up, the top trace array requires to have
1762 * a list of events that can be enabled. This must be done before
1763 * the filesystem is set up in order to allow events to be traced
1764 * early.
1765 */
1766static __init void
1767__trace_early_add_events(struct trace_array *tr)
1768{
1769 struct ftrace_event_call *call;
1770 int ret;
1771
1772 list_for_each_entry(call, &ftrace_events, list) {
1773 /* Early boot up should not have any modules loaded */
1774 if (WARN_ON_ONCE(call->mod))
1775 continue;
1776
1777 ret = __trace_early_add_new_event(call, tr);
1778 if (ret < 0)
1779 pr_warning("Could not create early event %s\n",
1780 call->name);
1781 }
1782}
1783
Steven Rostedt0c8916c2012-08-07 16:14:16 -04001784/* Remove the event directory structure for a trace directory. */
1785static void
1786__trace_remove_event_dirs(struct trace_array *tr)
1787{
1788 struct ftrace_event_file *file, *next;
1789
1790 list_for_each_entry_safe(file, next, &tr->events, list) {
1791 list_del(&file->list);
1792 debugfs_remove_recursive(file->dir);
1793 remove_subsystem(file->system);
1794 kfree(file);
1795 }
1796}
1797
Steven Rostedtae63b312012-05-03 23:09:03 -04001798static void
1799__add_event_to_tracers(struct ftrace_event_call *call,
1800 struct ftrace_module_file_ops *file_ops)
1801{
1802 struct trace_array *tr;
1803
1804 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1805 if (file_ops)
1806 __trace_add_new_event(call, tr,
1807 &file_ops->id, &file_ops->enable,
1808 &file_ops->filter, &file_ops->format);
1809 else
1810 __trace_add_new_event(call, tr,
1811 &ftrace_event_id_fops,
1812 &ftrace_enable_fops,
1813 &ftrace_event_filter_fops,
1814 &ftrace_event_format_fops);
1815 }
1816}
1817
Steven Rostedtec827c72009-09-14 10:50:23 -04001818static struct notifier_block trace_module_nb = {
Steven Rostedt6d723732009-04-10 14:53:50 -04001819 .notifier_call = trace_module_notify,
1820 .priority = 0,
1821};
1822
Steven Rostedte4a9ea52011-01-27 09:15:30 -05001823extern struct ftrace_event_call *__start_ftrace_events[];
1824extern struct ftrace_event_call *__stop_ftrace_events[];
Steven Rostedta59fd602009-04-10 13:52:20 -04001825
Li Zefan020e5f82009-07-01 10:47:05 +08001826static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1827
1828static __init int setup_trace_event(char *str)
1829{
1830 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1831 ring_buffer_expanded = 1;
1832 tracing_selftest_disabled = 1;
1833
1834 return 1;
1835}
1836__setup("trace_event=", setup_trace_event);
1837
Steven Rostedt77248222013-02-27 16:28:06 -05001838/* Expects to have event_mutex held when called */
1839static int
1840create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
Steven Rostedtae63b312012-05-03 23:09:03 -04001841{
1842 struct dentry *d_events;
1843 struct dentry *entry;
1844
1845 entry = debugfs_create_file("set_event", 0644, parent,
1846 tr, &ftrace_set_event_fops);
1847 if (!entry) {
1848 pr_warning("Could not create debugfs 'set_event' entry\n");
1849 return -ENOMEM;
1850 }
1851
1852 d_events = debugfs_create_dir("events", parent);
Steven Rostedt277ba042012-08-03 16:10:49 -04001853 if (!d_events) {
Steven Rostedtae63b312012-05-03 23:09:03 -04001854 pr_warning("Could not create debugfs 'events' directory\n");
Steven Rostedt277ba042012-08-03 16:10:49 -04001855 return -ENOMEM;
1856 }
Steven Rostedtae63b312012-05-03 23:09:03 -04001857
1858 /* ring buffer internal formats */
1859 trace_create_file("header_page", 0444, d_events,
1860 ring_buffer_print_page_header,
1861 &ftrace_show_header_fops);
1862
1863 trace_create_file("header_event", 0444, d_events,
1864 ring_buffer_print_entry_header,
1865 &ftrace_show_header_fops);
1866
1867 trace_create_file("enable", 0644, d_events,
1868 tr, &ftrace_tr_enable_fops);
1869
1870 tr->event_dir = d_events;
Steven Rostedt77248222013-02-27 16:28:06 -05001871
1872 return 0;
1873}
1874
1875/**
1876 * event_trace_add_tracer - add a instance of a trace_array to events
1877 * @parent: The parent dentry to place the files/directories for events in
1878 * @tr: The trace array associated with these events
1879 *
1880 * When a new instance is created, it needs to set up its events
1881 * directory, as well as other files associated with events. It also
1882 * creates the event hierachry in the @parent/events directory.
1883 *
1884 * Returns 0 on success.
1885 */
1886int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
1887{
1888 int ret;
1889
1890 mutex_lock(&event_mutex);
1891
1892 ret = create_event_toplevel_files(parent, tr);
1893 if (ret)
1894 goto out_unlock;
1895
Steven Rostedt277ba042012-08-03 16:10:49 -04001896 down_write(&trace_event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001897 __trace_add_event_dirs(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04001898 up_write(&trace_event_mutex);
1899
Steven Rostedt77248222013-02-27 16:28:06 -05001900 out_unlock:
Steven Rostedt277ba042012-08-03 16:10:49 -04001901 mutex_unlock(&event_mutex);
Steven Rostedtae63b312012-05-03 23:09:03 -04001902
Steven Rostedt77248222013-02-27 16:28:06 -05001903 return ret;
1904}
1905
1906/*
1907 * The top trace array already had its file descriptors created.
1908 * Now the files themselves need to be created.
1909 */
1910static __init int
1911early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
1912{
1913 int ret;
1914
1915 mutex_lock(&event_mutex);
1916
1917 ret = create_event_toplevel_files(parent, tr);
1918 if (ret)
1919 goto out_unlock;
1920
1921 down_write(&trace_event_mutex);
1922 __trace_early_add_event_dirs(tr);
1923 up_write(&trace_event_mutex);
1924
1925 out_unlock:
1926 mutex_unlock(&event_mutex);
1927
1928 return ret;
Steven Rostedtae63b312012-05-03 23:09:03 -04001929}
1930
Steven Rostedt0c8916c2012-08-07 16:14:16 -04001931int event_trace_del_tracer(struct trace_array *tr)
1932{
1933 /* Disable any running events */
1934 __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
1935
1936 mutex_lock(&event_mutex);
1937
1938 down_write(&trace_event_mutex);
1939 __trace_remove_event_dirs(tr);
1940 debugfs_remove_recursive(tr->event_dir);
1941 up_write(&trace_event_mutex);
1942
1943 tr->event_dir = NULL;
1944
1945 mutex_unlock(&event_mutex);
1946
1947 return 0;
1948}
1949
Ezequiel Garcia87819152012-09-12 11:47:57 -03001950static __init int event_trace_enable(void)
1951{
Steven Rostedtae63b312012-05-03 23:09:03 -04001952 struct trace_array *tr = top_trace_array();
Ezequiel Garcia87819152012-09-12 11:47:57 -03001953 struct ftrace_event_call **iter, *call;
1954 char *buf = bootup_event_buf;
1955 char *token;
1956 int ret;
1957
1958 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
1959
1960 call = *iter;
1961 ret = event_init(call);
1962 if (!ret)
1963 list_add(&call->list, &ftrace_events);
1964 }
1965
Steven Rostedt77248222013-02-27 16:28:06 -05001966 /*
1967 * We need the top trace array to have a working set of trace
1968 * points at early init, before the debug files and directories
1969 * are created. Create the file entries now, and attach them
1970 * to the actual file dentries later.
1971 */
1972 __trace_early_add_events(tr);
1973
Ezequiel Garcia87819152012-09-12 11:47:57 -03001974 while (true) {
1975 token = strsep(&buf, ",");
1976
1977 if (!token)
1978 break;
1979 if (!*token)
1980 continue;
1981
Steven Rostedtae63b312012-05-03 23:09:03 -04001982 ret = ftrace_set_clr_event(tr, token, 1);
Ezequiel Garcia87819152012-09-12 11:47:57 -03001983 if (ret)
1984 pr_warn("Failed to enable trace event: %s\n", token);
1985 }
Steven Rostedt81698832012-10-11 10:15:05 -04001986
1987 trace_printk_start_comm();
1988
Ezequiel Garcia87819152012-09-12 11:47:57 -03001989 return 0;
1990}
1991
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001992static __init int event_trace_init(void)
1993{
Steven Rostedtae63b312012-05-03 23:09:03 -04001994 struct trace_array *tr;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001995 struct dentry *d_tracer;
1996 struct dentry *entry;
Steven Rostedt6d723732009-04-10 14:53:50 -04001997 int ret;
Steven Rostedtb77e38a2009-02-24 10:21:36 -05001998
Steven Rostedtae63b312012-05-03 23:09:03 -04001999 tr = top_trace_array();
2000
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002001 d_tracer = tracing_init_dentry();
2002 if (!d_tracer)
2003 return 0;
2004
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002005 entry = debugfs_create_file("available_events", 0444, d_tracer,
Steven Rostedtae63b312012-05-03 23:09:03 -04002006 tr, &ftrace_avail_fops);
Steven Rostedt2314c4a2009-03-10 12:04:02 -04002007 if (!entry)
2008 pr_warning("Could not create debugfs "
2009 "'available_events' entry\n");
2010
Li Zefan8728fe52010-05-24 16:22:49 +08002011 if (trace_define_common_fields())
2012 pr_warning("tracing: Failed to allocate common fields");
2013
Steven Rostedt77248222013-02-27 16:28:06 -05002014 ret = early_event_add_tracer(d_tracer, tr);
Steven Rostedtae63b312012-05-03 23:09:03 -04002015 if (ret)
2016 return ret;
Li Zefan020e5f82009-07-01 10:47:05 +08002017
Steven Rostedt6d723732009-04-10 14:53:50 -04002018 ret = register_module_notifier(&trace_module_nb);
Ming Lei55379372009-05-18 23:04:46 +08002019 if (ret)
Steven Rostedt6d723732009-04-10 14:53:50 -04002020 pr_warning("Failed to register trace events module notifier\n");
2021
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002022 return 0;
2023}
Ezequiel Garcia87819152012-09-12 11:47:57 -03002024core_initcall(event_trace_enable);
Steven Rostedtb77e38a2009-02-24 10:21:36 -05002025fs_initcall(event_trace_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002026
2027#ifdef CONFIG_FTRACE_STARTUP_TEST
2028
2029static DEFINE_SPINLOCK(test_spinlock);
2030static DEFINE_SPINLOCK(test_spinlock_irq);
2031static DEFINE_MUTEX(test_mutex);
2032
2033static __init void test_work(struct work_struct *dummy)
2034{
2035 spin_lock(&test_spinlock);
2036 spin_lock_irq(&test_spinlock_irq);
2037 udelay(1);
2038 spin_unlock_irq(&test_spinlock_irq);
2039 spin_unlock(&test_spinlock);
2040
2041 mutex_lock(&test_mutex);
2042 msleep(1);
2043 mutex_unlock(&test_mutex);
2044}
2045
2046static __init int event_test_thread(void *unused)
2047{
2048 void *test_malloc;
2049
2050 test_malloc = kmalloc(1234, GFP_KERNEL);
2051 if (!test_malloc)
2052 pr_info("failed to kmalloc\n");
2053
2054 schedule_on_each_cpu(test_work);
2055
2056 kfree(test_malloc);
2057
2058 set_current_state(TASK_INTERRUPTIBLE);
2059 while (!kthread_should_stop())
2060 schedule();
2061
2062 return 0;
2063}
2064
2065/*
2066 * Do various things that may trigger events.
2067 */
2068static __init void event_test_stuff(void)
2069{
2070 struct task_struct *test_thread;
2071
2072 test_thread = kthread_run(event_test_thread, NULL, "test-events");
2073 msleep(1);
2074 kthread_stop(test_thread);
2075}
2076
2077/*
2078 * For every trace event defined, we will test each trace point separately,
2079 * and then by groups, and finally all trace points.
2080 */
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002081static __init void event_trace_self_tests(void)
Steven Rostedte6187002009-04-15 13:36:40 -04002082{
Steven Rostedtae63b312012-05-03 23:09:03 -04002083 struct ftrace_subsystem_dir *dir;
2084 struct ftrace_event_file *file;
Steven Rostedte6187002009-04-15 13:36:40 -04002085 struct ftrace_event_call *call;
2086 struct event_subsystem *system;
Steven Rostedtae63b312012-05-03 23:09:03 -04002087 struct trace_array *tr;
Steven Rostedte6187002009-04-15 13:36:40 -04002088 int ret;
2089
Steven Rostedtae63b312012-05-03 23:09:03 -04002090 tr = top_trace_array();
2091
Steven Rostedte6187002009-04-15 13:36:40 -04002092 pr_info("Running tests on trace events:\n");
2093
Steven Rostedtae63b312012-05-03 23:09:03 -04002094 list_for_each_entry(file, &tr->events, list) {
2095
2096 call = file->event_call;
Steven Rostedte6187002009-04-15 13:36:40 -04002097
Steven Rostedt22392912010-04-21 12:27:06 -04002098 /* Only test those that have a probe */
2099 if (!call->class || !call->class->probe)
Steven Rostedte6187002009-04-15 13:36:40 -04002100 continue;
2101
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002102/*
2103 * Testing syscall events here is pretty useless, but
2104 * we still do it if configured. But this is time consuming.
2105 * What we really need is a user thread to perform the
2106 * syscalls as we test.
2107 */
2108#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
Steven Rostedt8f082012010-04-20 10:47:33 -04002109 if (call->class->system &&
2110 strcmp(call->class->system, "syscalls") == 0)
Steven Rostedt1f5a6b42009-09-14 11:58:24 -04002111 continue;
2112#endif
2113
Steven Rostedte6187002009-04-15 13:36:40 -04002114 pr_info("Testing event %s: ", call->name);
2115
2116 /*
2117 * If an event is already enabled, someone is using
2118 * it and the self test should not be on.
2119 */
Steven Rostedtae63b312012-05-03 23:09:03 -04002120 if (file->flags & FTRACE_EVENT_FL_ENABLED) {
Steven Rostedte6187002009-04-15 13:36:40 -04002121 pr_warning("Enabled event during self test!\n");
2122 WARN_ON_ONCE(1);
2123 continue;
2124 }
2125
Steven Rostedtae63b312012-05-03 23:09:03 -04002126 ftrace_event_enable_disable(file, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002127 event_test_stuff();
Steven Rostedtae63b312012-05-03 23:09:03 -04002128 ftrace_event_enable_disable(file, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002129
2130 pr_cont("OK\n");
2131 }
2132
2133 /* Now test at the sub system level */
2134
2135 pr_info("Running tests on trace event systems:\n");
2136
Steven Rostedtae63b312012-05-03 23:09:03 -04002137 list_for_each_entry(dir, &tr->systems, list) {
2138
2139 system = dir->subsystem;
Steven Rostedte6187002009-04-15 13:36:40 -04002140
2141 /* the ftrace system is special, skip it */
2142 if (strcmp(system->name, "ftrace") == 0)
2143 continue;
2144
2145 pr_info("Testing event system %s: ", system->name);
2146
Steven Rostedtae63b312012-05-03 23:09:03 -04002147 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002148 if (WARN_ON_ONCE(ret)) {
2149 pr_warning("error enabling system %s\n",
2150 system->name);
2151 continue;
2152 }
2153
2154 event_test_stuff();
2155
Steven Rostedtae63b312012-05-03 23:09:03 -04002156 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002157 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04002158 pr_warning("error disabling system %s\n",
2159 system->name);
Yuanhan Liu76bab1b2012-08-27 15:13:45 +08002160 continue;
2161 }
Steven Rostedte6187002009-04-15 13:36:40 -04002162
2163 pr_cont("OK\n");
2164 }
2165
2166 /* Test with all events enabled */
2167
2168 pr_info("Running tests on all trace events:\n");
2169 pr_info("Testing all events: ");
2170
Steven Rostedtae63b312012-05-03 23:09:03 -04002171 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
Steven Rostedte6187002009-04-15 13:36:40 -04002172 if (WARN_ON_ONCE(ret)) {
Steven Rostedte6187002009-04-15 13:36:40 -04002173 pr_warning("error enabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002174 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002175 }
2176
2177 event_test_stuff();
2178
2179 /* reset sysname */
Steven Rostedtae63b312012-05-03 23:09:03 -04002180 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
Steven Rostedte6187002009-04-15 13:36:40 -04002181 if (WARN_ON_ONCE(ret)) {
2182 pr_warning("error disabling all events\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002183 return;
Steven Rostedte6187002009-04-15 13:36:40 -04002184 }
2185
2186 pr_cont("OK\n");
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002187}
2188
2189#ifdef CONFIG_FUNCTION_TRACER
2190
Tejun Heo245b2e72009-06-24 15:13:48 +09002191static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002192
2193static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -04002194function_test_events_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -04002195 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002196{
2197 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002198 struct ring_buffer *buffer;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002199 struct ftrace_entry *entry;
2200 unsigned long flags;
2201 long disabled;
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002202 int cpu;
2203 int pc;
2204
2205 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002206 preempt_disable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002207 cpu = raw_smp_processor_id();
Tejun Heo245b2e72009-06-24 15:13:48 +09002208 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002209
2210 if (disabled != 1)
2211 goto out;
2212
2213 local_save_flags(flags);
2214
Steven Rostedte77405a2009-09-02 14:17:06 -04002215 event = trace_current_buffer_lock_reserve(&buffer,
2216 TRACE_FN, sizeof(*entry),
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002217 flags, pc);
2218 if (!event)
2219 goto out;
2220 entry = ring_buffer_event_data(event);
2221 entry->ip = ip;
2222 entry->parent_ip = parent_ip;
2223
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04002224 trace_buffer_unlock_commit(buffer, event, flags, pc);
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002225
2226 out:
Tejun Heo245b2e72009-06-24 15:13:48 +09002227 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
Steven Rostedt5168ae52010-06-03 09:36:50 -04002228 preempt_enable_notrace();
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002229}
2230
2231static struct ftrace_ops trace_ops __initdata =
2232{
2233 .func = function_test_events_call,
Steven Rostedt47409742012-07-20 11:04:44 -04002234 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002235};
2236
2237static __init void event_trace_self_test_with_function(void)
2238{
Steven Rostedt17bb6152011-05-23 15:27:46 -04002239 int ret;
2240 ret = register_ftrace_function(&trace_ops);
2241 if (WARN_ON(ret < 0)) {
2242 pr_info("Failed to enable function tracer for event tests\n");
2243 return;
2244 }
Steven Rostedt9ea21c12009-04-16 12:15:44 -04002245 pr_info("Running tests again, along with the function tracer\n");
2246 event_trace_self_tests();
2247 unregister_ftrace_function(&trace_ops);
2248}
2249#else
2250static __init void event_trace_self_test_with_function(void)
2251{
2252}
2253#endif
2254
2255static __init int event_trace_self_tests_init(void)
2256{
Li Zefan020e5f82009-07-01 10:47:05 +08002257 if (!tracing_selftest_disabled) {
2258 event_trace_self_tests();
2259 event_trace_self_test_with_function();
2260 }
Steven Rostedte6187002009-04-15 13:36:40 -04002261
2262 return 0;
2263}
2264
Steven Rostedt28d20e22009-04-20 12:12:44 -04002265late_initcall(event_trace_self_tests_init);
Steven Rostedte6187002009-04-15 13:36:40 -04002266
2267#endif