tracing/filter: Swap entire filter of events
[linux-2.6.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/debugfs.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ctype.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20
21 #include <asm/setup.h>
22
23 #include "trace_output.h"
24
25 #undef TRACE_SYSTEM
26 #define TRACE_SYSTEM "TRACE_SYSTEM"
27
28 DEFINE_MUTEX(event_mutex);
29
30 DEFINE_MUTEX(event_storage_mutex);
31 EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33 char event_storage[EVENT_STORAGE_SIZE];
34 EXPORT_SYMBOL_GPL(event_storage);
35
36 LIST_HEAD(ftrace_events);
37 LIST_HEAD(ftrace_common_fields);
38
39 struct list_head *
40 trace_get_fields(struct ftrace_event_call *event_call)
41 {
42         if (!event_call->class->get_fields)
43                 return &event_call->class->fields;
44         return event_call->class->get_fields(event_call);
45 }
46
47 static int __trace_define_field(struct list_head *head, const char *type,
48                                 const char *name, int offset, int size,
49                                 int is_signed, int filter_type)
50 {
51         struct ftrace_event_field *field;
52
53         field = kzalloc(sizeof(*field), GFP_KERNEL);
54         if (!field)
55                 goto err;
56
57         field->name = kstrdup(name, GFP_KERNEL);
58         if (!field->name)
59                 goto err;
60
61         field->type = kstrdup(type, GFP_KERNEL);
62         if (!field->type)
63                 goto err;
64
65         if (filter_type == FILTER_OTHER)
66                 field->filter_type = filter_assign_type(type);
67         else
68                 field->filter_type = filter_type;
69
70         field->offset = offset;
71         field->size = size;
72         field->is_signed = is_signed;
73
74         list_add(&field->link, head);
75
76         return 0;
77
78 err:
79         if (field)
80                 kfree(field->name);
81         kfree(field);
82
83         return -ENOMEM;
84 }
85
86 int trace_define_field(struct ftrace_event_call *call, const char *type,
87                        const char *name, int offset, int size, int is_signed,
88                        int filter_type)
89 {
90         struct list_head *head;
91
92         if (WARN_ON(!call->class))
93                 return 0;
94
95         head = trace_get_fields(call);
96         return __trace_define_field(head, type, name, offset, size,
97                                     is_signed, filter_type);
98 }
99 EXPORT_SYMBOL_GPL(trace_define_field);
100
101 #define __common_field(type, item)                                      \
102         ret = __trace_define_field(&ftrace_common_fields, #type,        \
103                                    "common_" #item,                     \
104                                    offsetof(typeof(ent), item),         \
105                                    sizeof(ent.item),                    \
106                                    is_signed_type(type), FILTER_OTHER); \
107         if (ret)                                                        \
108                 return ret;
109
110 static int trace_define_common_fields(void)
111 {
112         int ret;
113         struct trace_entry ent;
114
115         __common_field(unsigned short, type);
116         __common_field(unsigned char, flags);
117         __common_field(unsigned char, preempt_count);
118         __common_field(int, pid);
119         __common_field(int, lock_depth);
120
121         return ret;
122 }
123
124 void trace_destroy_fields(struct ftrace_event_call *call)
125 {
126         struct ftrace_event_field *field, *next;
127         struct list_head *head;
128
129         head = trace_get_fields(call);
130         list_for_each_entry_safe(field, next, head, link) {
131                 list_del(&field->link);
132                 kfree(field->type);
133                 kfree(field->name);
134                 kfree(field);
135         }
136 }
137
138 int trace_event_raw_init(struct ftrace_event_call *call)
139 {
140         int id;
141
142         id = register_ftrace_event(&call->event);
143         if (!id)
144                 return -ENODEV;
145
146         return 0;
147 }
148 EXPORT_SYMBOL_GPL(trace_event_raw_init);
149
150 int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
151 {
152         switch (type) {
153         case TRACE_REG_REGISTER:
154                 return tracepoint_probe_register(call->name,
155                                                  call->class->probe,
156                                                  call);
157         case TRACE_REG_UNREGISTER:
158                 tracepoint_probe_unregister(call->name,
159                                             call->class->probe,
160                                             call);
161                 return 0;
162
163 #ifdef CONFIG_PERF_EVENTS
164         case TRACE_REG_PERF_REGISTER:
165                 return tracepoint_probe_register(call->name,
166                                                  call->class->perf_probe,
167                                                  call);
168         case TRACE_REG_PERF_UNREGISTER:
169                 tracepoint_probe_unregister(call->name,
170                                             call->class->perf_probe,
171                                             call);
172                 return 0;
173 #endif
174         }
175         return 0;
176 }
177 EXPORT_SYMBOL_GPL(ftrace_event_reg);
178
179 void trace_event_enable_cmd_record(bool enable)
180 {
181         struct ftrace_event_call *call;
182
183         mutex_lock(&event_mutex);
184         list_for_each_entry(call, &ftrace_events, list) {
185                 if (!(call->flags & TRACE_EVENT_FL_ENABLED))
186                         continue;
187
188                 if (enable) {
189                         tracing_start_cmdline_record();
190                         call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
191                 } else {
192                         tracing_stop_cmdline_record();
193                         call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
194                 }
195         }
196         mutex_unlock(&event_mutex);
197 }
198
199 static int ftrace_event_enable_disable(struct ftrace_event_call *call,
200                                         int enable)
201 {
202         int ret = 0;
203
204         switch (enable) {
205         case 0:
206                 if (call->flags & TRACE_EVENT_FL_ENABLED) {
207                         call->flags &= ~TRACE_EVENT_FL_ENABLED;
208                         if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) {
209                                 tracing_stop_cmdline_record();
210                                 call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
211                         }
212                         call->class->reg(call, TRACE_REG_UNREGISTER);
213                 }
214                 break;
215         case 1:
216                 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
217                         if (trace_flags & TRACE_ITER_RECORD_CMD) {
218                                 tracing_start_cmdline_record();
219                                 call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
220                         }
221                         ret = call->class->reg(call, TRACE_REG_REGISTER);
222                         if (ret) {
223                                 tracing_stop_cmdline_record();
224                                 pr_info("event trace: Could not enable event "
225                                         "%s\n", call->name);
226                                 break;
227                         }
228                         call->flags |= TRACE_EVENT_FL_ENABLED;
229                 }
230                 break;
231         }
232
233         return ret;
234 }
235
236 static void ftrace_clear_events(void)
237 {
238         struct ftrace_event_call *call;
239
240         mutex_lock(&event_mutex);
241         list_for_each_entry(call, &ftrace_events, list) {
242                 ftrace_event_enable_disable(call, 0);
243         }
244         mutex_unlock(&event_mutex);
245 }
246
247 /*
248  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
249  */
250 static int __ftrace_set_clr_event(const char *match, const char *sub,
251                                   const char *event, int set)
252 {
253         struct ftrace_event_call *call;
254         int ret = -EINVAL;
255
256         mutex_lock(&event_mutex);
257         list_for_each_entry(call, &ftrace_events, list) {
258
259                 if (!call->name || !call->class || !call->class->reg)
260                         continue;
261
262                 if (match &&
263                     strcmp(match, call->name) != 0 &&
264                     strcmp(match, call->class->system) != 0)
265                         continue;
266
267                 if (sub && strcmp(sub, call->class->system) != 0)
268                         continue;
269
270                 if (event && strcmp(event, call->name) != 0)
271                         continue;
272
273                 ftrace_event_enable_disable(call, set);
274
275                 ret = 0;
276         }
277         mutex_unlock(&event_mutex);
278
279         return ret;
280 }
281
282 static int ftrace_set_clr_event(char *buf, int set)
283 {
284         char *event = NULL, *sub = NULL, *match;
285
286         /*
287          * The buf format can be <subsystem>:<event-name>
288          *  *:<event-name> means any event by that name.
289          *  :<event-name> is the same.
290          *
291          *  <subsystem>:* means all events in that subsystem
292          *  <subsystem>: means the same.
293          *
294          *  <name> (no ':') means all events in a subsystem with
295          *  the name <name> or any event that matches <name>
296          */
297
298         match = strsep(&buf, ":");
299         if (buf) {
300                 sub = match;
301                 event = buf;
302                 match = NULL;
303
304                 if (!strlen(sub) || strcmp(sub, "*") == 0)
305                         sub = NULL;
306                 if (!strlen(event) || strcmp(event, "*") == 0)
307                         event = NULL;
308         }
309
310         return __ftrace_set_clr_event(match, sub, event, set);
311 }
312
313 /**
314  * trace_set_clr_event - enable or disable an event
315  * @system: system name to match (NULL for any system)
316  * @event: event name to match (NULL for all events, within system)
317  * @set: 1 to enable, 0 to disable
318  *
319  * This is a way for other parts of the kernel to enable or disable
320  * event recording.
321  *
322  * Returns 0 on success, -EINVAL if the parameters do not match any
323  * registered events.
324  */
325 int trace_set_clr_event(const char *system, const char *event, int set)
326 {
327         return __ftrace_set_clr_event(NULL, system, event, set);
328 }
329
330 /* 128 should be much more than enough */
331 #define EVENT_BUF_SIZE          127
332
333 static ssize_t
334 ftrace_event_write(struct file *file, const char __user *ubuf,
335                    size_t cnt, loff_t *ppos)
336 {
337         struct trace_parser parser;
338         ssize_t read, ret;
339
340         if (!cnt)
341                 return 0;
342
343         ret = tracing_update_buffers();
344         if (ret < 0)
345                 return ret;
346
347         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
348                 return -ENOMEM;
349
350         read = trace_get_user(&parser, ubuf, cnt, ppos);
351
352         if (read >= 0 && trace_parser_loaded((&parser))) {
353                 int set = 1;
354
355                 if (*parser.buffer == '!')
356                         set = 0;
357
358                 parser.buffer[parser.idx] = 0;
359
360                 ret = ftrace_set_clr_event(parser.buffer + !set, set);
361                 if (ret)
362                         goto out_put;
363         }
364
365         ret = read;
366
367  out_put:
368         trace_parser_put(&parser);
369
370         return ret;
371 }
372
373 static void *
374 t_next(struct seq_file *m, void *v, loff_t *pos)
375 {
376         struct ftrace_event_call *call = v;
377
378         (*pos)++;
379
380         list_for_each_entry_continue(call, &ftrace_events, list) {
381                 /*
382                  * The ftrace subsystem is for showing formats only.
383                  * They can not be enabled or disabled via the event files.
384                  */
385                 if (call->class && call->class->reg)
386                         return call;
387         }
388
389         return NULL;
390 }
391
392 static void *t_start(struct seq_file *m, loff_t *pos)
393 {
394         struct ftrace_event_call *call;
395         loff_t l;
396
397         mutex_lock(&event_mutex);
398
399         call = list_entry(&ftrace_events, struct ftrace_event_call, list);
400         for (l = 0; l <= *pos; ) {
401                 call = t_next(m, call, &l);
402                 if (!call)
403                         break;
404         }
405         return call;
406 }
407
408 static void *
409 s_next(struct seq_file *m, void *v, loff_t *pos)
410 {
411         struct ftrace_event_call *call = v;
412
413         (*pos)++;
414
415         list_for_each_entry_continue(call, &ftrace_events, list) {
416                 if (call->flags & TRACE_EVENT_FL_ENABLED)
417                         return call;
418         }
419
420         return NULL;
421 }
422
423 static void *s_start(struct seq_file *m, loff_t *pos)
424 {
425         struct ftrace_event_call *call;
426         loff_t l;
427
428         mutex_lock(&event_mutex);
429
430         call = list_entry(&ftrace_events, struct ftrace_event_call, list);
431         for (l = 0; l <= *pos; ) {
432                 call = s_next(m, call, &l);
433                 if (!call)
434                         break;
435         }
436         return call;
437 }
438
439 static int t_show(struct seq_file *m, void *v)
440 {
441         struct ftrace_event_call *call = v;
442
443         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
444                 seq_printf(m, "%s:", call->class->system);
445         seq_printf(m, "%s\n", call->name);
446
447         return 0;
448 }
449
450 static void t_stop(struct seq_file *m, void *p)
451 {
452         mutex_unlock(&event_mutex);
453 }
454
455 static int
456 ftrace_event_seq_open(struct inode *inode, struct file *file)
457 {
458         const struct seq_operations *seq_ops;
459
460         if ((file->f_mode & FMODE_WRITE) &&
461             (file->f_flags & O_TRUNC))
462                 ftrace_clear_events();
463
464         seq_ops = inode->i_private;
465         return seq_open(file, seq_ops);
466 }
467
468 static ssize_t
469 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
470                   loff_t *ppos)
471 {
472         struct ftrace_event_call *call = filp->private_data;
473         char *buf;
474
475         if (call->flags & TRACE_EVENT_FL_ENABLED)
476                 buf = "1\n";
477         else
478                 buf = "0\n";
479
480         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
481 }
482
483 static ssize_t
484 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
485                    loff_t *ppos)
486 {
487         struct ftrace_event_call *call = filp->private_data;
488         char buf[64];
489         unsigned long val;
490         int ret;
491
492         if (cnt >= sizeof(buf))
493                 return -EINVAL;
494
495         if (copy_from_user(&buf, ubuf, cnt))
496                 return -EFAULT;
497
498         buf[cnt] = 0;
499
500         ret = strict_strtoul(buf, 10, &val);
501         if (ret < 0)
502                 return ret;
503
504         ret = tracing_update_buffers();
505         if (ret < 0)
506                 return ret;
507
508         switch (val) {
509         case 0:
510         case 1:
511                 mutex_lock(&event_mutex);
512                 ret = ftrace_event_enable_disable(call, val);
513                 mutex_unlock(&event_mutex);
514                 break;
515
516         default:
517                 return -EINVAL;
518         }
519
520         *ppos += cnt;
521
522         return ret ? ret : cnt;
523 }
524
525 static ssize_t
526 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
527                    loff_t *ppos)
528 {
529         const char set_to_char[4] = { '?', '0', '1', 'X' };
530         const char *system = filp->private_data;
531         struct ftrace_event_call *call;
532         char buf[2];
533         int set = 0;
534         int ret;
535
536         mutex_lock(&event_mutex);
537         list_for_each_entry(call, &ftrace_events, list) {
538                 if (!call->name || !call->class || !call->class->reg)
539                         continue;
540
541                 if (system && strcmp(call->class->system, system) != 0)
542                         continue;
543
544                 /*
545                  * We need to find out if all the events are set
546                  * or if all events or cleared, or if we have
547                  * a mixture.
548                  */
549                 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
550
551                 /*
552                  * If we have a mixture, no need to look further.
553                  */
554                 if (set == 3)
555                         break;
556         }
557         mutex_unlock(&event_mutex);
558
559         buf[0] = set_to_char[set];
560         buf[1] = '\n';
561
562         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
563
564         return ret;
565 }
566
567 static ssize_t
568 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
569                     loff_t *ppos)
570 {
571         const char *system = filp->private_data;
572         unsigned long val;
573         char buf[64];
574         ssize_t ret;
575
576         if (cnt >= sizeof(buf))
577                 return -EINVAL;
578
579         if (copy_from_user(&buf, ubuf, cnt))
580                 return -EFAULT;
581
582         buf[cnt] = 0;
583
584         ret = strict_strtoul(buf, 10, &val);
585         if (ret < 0)
586                 return ret;
587
588         ret = tracing_update_buffers();
589         if (ret < 0)
590                 return ret;
591
592         if (val != 0 && val != 1)
593                 return -EINVAL;
594
595         ret = __ftrace_set_clr_event(NULL, system, NULL, val);
596         if (ret)
597                 goto out;
598
599         ret = cnt;
600
601 out:
602         *ppos += cnt;
603
604         return ret;
605 }
606
607 enum {
608         FORMAT_HEADER           = 1,
609         FORMAT_FIELD_SEPERATOR  = 2,
610         FORMAT_PRINTFMT         = 3,
611 };
612
613 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
614 {
615         struct ftrace_event_call *call = m->private;
616         struct ftrace_event_field *field;
617         struct list_head *common_head = &ftrace_common_fields;
618         struct list_head *head = trace_get_fields(call);
619
620         (*pos)++;
621
622         switch ((unsigned long)v) {
623         case FORMAT_HEADER:
624                 if (unlikely(list_empty(common_head)))
625                         return NULL;
626
627                 field = list_entry(common_head->prev,
628                                    struct ftrace_event_field, link);
629                 return field;
630
631         case FORMAT_FIELD_SEPERATOR:
632                 if (unlikely(list_empty(head)))
633                         return NULL;
634
635                 field = list_entry(head->prev, struct ftrace_event_field, link);
636                 return field;
637
638         case FORMAT_PRINTFMT:
639                 /* all done */
640                 return NULL;
641         }
642
643         field = v;
644         if (field->link.prev == common_head)
645                 return (void *)FORMAT_FIELD_SEPERATOR;
646         else if (field->link.prev == head)
647                 return (void *)FORMAT_PRINTFMT;
648
649         field = list_entry(field->link.prev, struct ftrace_event_field, link);
650
651         return field;
652 }
653
654 static void *f_start(struct seq_file *m, loff_t *pos)
655 {
656         loff_t l = 0;
657         void *p;
658
659         /* Start by showing the header */
660         if (!*pos)
661                 return (void *)FORMAT_HEADER;
662
663         p = (void *)FORMAT_HEADER;
664         do {
665                 p = f_next(m, p, &l);
666         } while (p && l < *pos);
667
668         return p;
669 }
670
671 static int f_show(struct seq_file *m, void *v)
672 {
673         struct ftrace_event_call *call = m->private;
674         struct ftrace_event_field *field;
675         const char *array_descriptor;
676
677         switch ((unsigned long)v) {
678         case FORMAT_HEADER:
679                 seq_printf(m, "name: %s\n", call->name);
680                 seq_printf(m, "ID: %d\n", call->event.type);
681                 seq_printf(m, "format:\n");
682                 return 0;
683
684         case FORMAT_FIELD_SEPERATOR:
685                 seq_putc(m, '\n');
686                 return 0;
687
688         case FORMAT_PRINTFMT:
689                 seq_printf(m, "\nprint fmt: %s\n",
690                            call->print_fmt);
691                 return 0;
692         }
693
694         field = v;
695
696         /*
697          * Smartly shows the array type(except dynamic array).
698          * Normal:
699          *      field:TYPE VAR
700          * If TYPE := TYPE[LEN], it is shown:
701          *      field:TYPE VAR[LEN]
702          */
703         array_descriptor = strchr(field->type, '[');
704
705         if (!strncmp(field->type, "__data_loc", 10))
706                 array_descriptor = NULL;
707
708         if (!array_descriptor)
709                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
710                            field->type, field->name, field->offset,
711                            field->size, !!field->is_signed);
712         else
713                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
714                            (int)(array_descriptor - field->type),
715                            field->type, field->name,
716                            array_descriptor, field->offset,
717                            field->size, !!field->is_signed);
718
719         return 0;
720 }
721
722 static void f_stop(struct seq_file *m, void *p)
723 {
724 }
725
726 static const struct seq_operations trace_format_seq_ops = {
727         .start          = f_start,
728         .next           = f_next,
729         .stop           = f_stop,
730         .show           = f_show,
731 };
732
733 static int trace_format_open(struct inode *inode, struct file *file)
734 {
735         struct ftrace_event_call *call = inode->i_private;
736         struct seq_file *m;
737         int ret;
738
739         ret = seq_open(file, &trace_format_seq_ops);
740         if (ret < 0)
741                 return ret;
742
743         m = file->private_data;
744         m->private = call;
745
746         return 0;
747 }
748
749 static ssize_t
750 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
751 {
752         struct ftrace_event_call *call = filp->private_data;
753         struct trace_seq *s;
754         int r;
755
756         if (*ppos)
757                 return 0;
758
759         s = kmalloc(sizeof(*s), GFP_KERNEL);
760         if (!s)
761                 return -ENOMEM;
762
763         trace_seq_init(s);
764         trace_seq_printf(s, "%d\n", call->event.type);
765
766         r = simple_read_from_buffer(ubuf, cnt, ppos,
767                                     s->buffer, s->len);
768         kfree(s);
769         return r;
770 }
771
772 static ssize_t
773 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
774                   loff_t *ppos)
775 {
776         struct ftrace_event_call *call = filp->private_data;
777         struct trace_seq *s;
778         int r;
779
780         if (*ppos)
781                 return 0;
782
783         s = kmalloc(sizeof(*s), GFP_KERNEL);
784         if (!s)
785                 return -ENOMEM;
786
787         trace_seq_init(s);
788
789         print_event_filter(call, s);
790         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
791
792         kfree(s);
793
794         return r;
795 }
796
797 static ssize_t
798 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
799                    loff_t *ppos)
800 {
801         struct ftrace_event_call *call = filp->private_data;
802         char *buf;
803         int err;
804
805         if (cnt >= PAGE_SIZE)
806                 return -EINVAL;
807
808         buf = (char *)__get_free_page(GFP_TEMPORARY);
809         if (!buf)
810                 return -ENOMEM;
811
812         if (copy_from_user(buf, ubuf, cnt)) {
813                 free_page((unsigned long) buf);
814                 return -EFAULT;
815         }
816         buf[cnt] = '\0';
817
818         err = apply_event_filter(call, buf);
819         free_page((unsigned long) buf);
820         if (err < 0)
821                 return err;
822
823         *ppos += cnt;
824
825         return cnt;
826 }
827
828 static ssize_t
829 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
830                       loff_t *ppos)
831 {
832         struct event_subsystem *system = filp->private_data;
833         struct trace_seq *s;
834         int r;
835
836         if (*ppos)
837                 return 0;
838
839         s = kmalloc(sizeof(*s), GFP_KERNEL);
840         if (!s)
841                 return -ENOMEM;
842
843         trace_seq_init(s);
844
845         print_subsystem_event_filter(system, s);
846         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
847
848         kfree(s);
849
850         return r;
851 }
852
853 static ssize_t
854 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
855                        loff_t *ppos)
856 {
857         struct event_subsystem *system = filp->private_data;
858         char *buf;
859         int err;
860
861         if (cnt >= PAGE_SIZE)
862                 return -EINVAL;
863
864         buf = (char *)__get_free_page(GFP_TEMPORARY);
865         if (!buf)
866                 return -ENOMEM;
867
868         if (copy_from_user(buf, ubuf, cnt)) {
869                 free_page((unsigned long) buf);
870                 return -EFAULT;
871         }
872         buf[cnt] = '\0';
873
874         err = apply_subsystem_event_filter(system, buf);
875         free_page((unsigned long) buf);
876         if (err < 0)
877                 return err;
878
879         *ppos += cnt;
880
881         return cnt;
882 }
883
884 static ssize_t
885 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
886 {
887         int (*func)(struct trace_seq *s) = filp->private_data;
888         struct trace_seq *s;
889         int r;
890
891         if (*ppos)
892                 return 0;
893
894         s = kmalloc(sizeof(*s), GFP_KERNEL);
895         if (!s)
896                 return -ENOMEM;
897
898         trace_seq_init(s);
899
900         func(s);
901         r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
902
903         kfree(s);
904
905         return r;
906 }
907
908 static const struct seq_operations show_event_seq_ops = {
909         .start = t_start,
910         .next = t_next,
911         .show = t_show,
912         .stop = t_stop,
913 };
914
915 static const struct seq_operations show_set_event_seq_ops = {
916         .start = s_start,
917         .next = s_next,
918         .show = t_show,
919         .stop = t_stop,
920 };
921
922 static const struct file_operations ftrace_avail_fops = {
923         .open = ftrace_event_seq_open,
924         .read = seq_read,
925         .llseek = seq_lseek,
926         .release = seq_release,
927 };
928
929 static const struct file_operations ftrace_set_event_fops = {
930         .open = ftrace_event_seq_open,
931         .read = seq_read,
932         .write = ftrace_event_write,
933         .llseek = seq_lseek,
934         .release = seq_release,
935 };
936
937 static const struct file_operations ftrace_enable_fops = {
938         .open = tracing_open_generic,
939         .read = event_enable_read,
940         .write = event_enable_write,
941         .llseek = default_llseek,
942 };
943
944 static const struct file_operations ftrace_event_format_fops = {
945         .open = trace_format_open,
946         .read = seq_read,
947         .llseek = seq_lseek,
948         .release = seq_release,
949 };
950
951 static const struct file_operations ftrace_event_id_fops = {
952         .open = tracing_open_generic,
953         .read = event_id_read,
954         .llseek = default_llseek,
955 };
956
957 static const struct file_operations ftrace_event_filter_fops = {
958         .open = tracing_open_generic,
959         .read = event_filter_read,
960         .write = event_filter_write,
961         .llseek = default_llseek,
962 };
963
964 static const struct file_operations ftrace_subsystem_filter_fops = {
965         .open = tracing_open_generic,
966         .read = subsystem_filter_read,
967         .write = subsystem_filter_write,
968         .llseek = default_llseek,
969 };
970
971 static const struct file_operations ftrace_system_enable_fops = {
972         .open = tracing_open_generic,
973         .read = system_enable_read,
974         .write = system_enable_write,
975         .llseek = default_llseek,
976 };
977
978 static const struct file_operations ftrace_show_header_fops = {
979         .open = tracing_open_generic,
980         .read = show_header,
981         .llseek = default_llseek,
982 };
983
984 static struct dentry *event_trace_events_dir(void)
985 {
986         static struct dentry *d_tracer;
987         static struct dentry *d_events;
988
989         if (d_events)
990                 return d_events;
991
992         d_tracer = tracing_init_dentry();
993         if (!d_tracer)
994                 return NULL;
995
996         d_events = debugfs_create_dir("events", d_tracer);
997         if (!d_events)
998                 pr_warning("Could not create debugfs "
999                            "'events' directory\n");
1000
1001         return d_events;
1002 }
1003
1004 static LIST_HEAD(event_subsystems);
1005
1006 static struct dentry *
1007 event_subsystem_dir(const char *name, struct dentry *d_events)
1008 {
1009         struct event_subsystem *system;
1010         struct dentry *entry;
1011
1012         /* First see if we did not already create this dir */
1013         list_for_each_entry(system, &event_subsystems, list) {
1014                 if (strcmp(system->name, name) == 0) {
1015                         system->nr_events++;
1016                         return system->entry;
1017                 }
1018         }
1019
1020         /* need to create new entry */
1021         system = kmalloc(sizeof(*system), GFP_KERNEL);
1022         if (!system) {
1023                 pr_warning("No memory to create event subsystem %s\n",
1024                            name);
1025                 return d_events;
1026         }
1027
1028         system->entry = debugfs_create_dir(name, d_events);
1029         if (!system->entry) {
1030                 pr_warning("Could not create event subsystem %s\n",
1031                            name);
1032                 kfree(system);
1033                 return d_events;
1034         }
1035
1036         system->nr_events = 1;
1037         system->name = kstrdup(name, GFP_KERNEL);
1038         if (!system->name) {
1039                 debugfs_remove(system->entry);
1040                 kfree(system);
1041                 return d_events;
1042         }
1043
1044         list_add(&system->list, &event_subsystems);
1045
1046         system->filter = NULL;
1047
1048         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1049         if (!system->filter) {
1050                 pr_warning("Could not allocate filter for subsystem "
1051                            "'%s'\n", name);
1052                 return system->entry;
1053         }
1054
1055         entry = debugfs_create_file("filter", 0644, system->entry, system,
1056                                     &ftrace_subsystem_filter_fops);
1057         if (!entry) {
1058                 kfree(system->filter);
1059                 system->filter = NULL;
1060                 pr_warning("Could not create debugfs "
1061                            "'%s/filter' entry\n", name);
1062         }
1063
1064         trace_create_file("enable", 0644, system->entry,
1065                           (void *)system->name,
1066                           &ftrace_system_enable_fops);
1067
1068         return system->entry;
1069 }
1070
1071 static int
1072 event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
1073                  const struct file_operations *id,
1074                  const struct file_operations *enable,
1075                  const struct file_operations *filter,
1076                  const struct file_operations *format)
1077 {
1078         struct list_head *head;
1079         int ret;
1080
1081         /*
1082          * If the trace point header did not define TRACE_SYSTEM
1083          * then the system would be called "TRACE_SYSTEM".
1084          */
1085         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1086                 d_events = event_subsystem_dir(call->class->system, d_events);
1087
1088         call->dir = debugfs_create_dir(call->name, d_events);
1089         if (!call->dir) {
1090                 pr_warning("Could not create debugfs "
1091                            "'%s' directory\n", call->name);
1092                 return -1;
1093         }
1094
1095         if (call->class->reg)
1096                 trace_create_file("enable", 0644, call->dir, call,
1097                                   enable);
1098
1099 #ifdef CONFIG_PERF_EVENTS
1100         if (call->event.type && call->class->reg)
1101                 trace_create_file("id", 0444, call->dir, call,
1102                                   id);
1103 #endif
1104
1105         /*
1106          * Other events may have the same class. Only update
1107          * the fields if they are not already defined.
1108          */
1109         head = trace_get_fields(call);
1110         if (list_empty(head)) {
1111                 ret = call->class->define_fields(call);
1112                 if (ret < 0) {
1113                         pr_warning("Could not initialize trace point"
1114                                    " events/%s\n", call->name);
1115                         return ret;
1116                 }
1117         }
1118         trace_create_file("filter", 0644, call->dir, call,
1119                           filter);
1120
1121         trace_create_file("format", 0444, call->dir, call,
1122                           format);
1123
1124         return 0;
1125 }
1126
1127 static int
1128 __trace_add_event_call(struct ftrace_event_call *call, struct module *mod,
1129                        const struct file_operations *id,
1130                        const struct file_operations *enable,
1131                        const struct file_operations *filter,
1132                        const struct file_operations *format)
1133 {
1134         struct dentry *d_events;
1135         int ret;
1136
1137         /* The linker may leave blanks */
1138         if (!call->name)
1139                 return -EINVAL;
1140
1141         if (call->class->raw_init) {
1142                 ret = call->class->raw_init(call);
1143                 if (ret < 0) {
1144                         if (ret != -ENOSYS)
1145                                 pr_warning("Could not initialize trace events/%s\n",
1146                                            call->name);
1147                         return ret;
1148                 }
1149         }
1150
1151         d_events = event_trace_events_dir();
1152         if (!d_events)
1153                 return -ENOENT;
1154
1155         ret = event_create_dir(call, d_events, id, enable, filter, format);
1156         if (!ret)
1157                 list_add(&call->list, &ftrace_events);
1158         call->mod = mod;
1159
1160         return ret;
1161 }
1162
1163 /* Add an additional event_call dynamically */
1164 int trace_add_event_call(struct ftrace_event_call *call)
1165 {
1166         int ret;
1167         mutex_lock(&event_mutex);
1168         ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops,
1169                                      &ftrace_enable_fops,
1170                                      &ftrace_event_filter_fops,
1171                                      &ftrace_event_format_fops);
1172         mutex_unlock(&event_mutex);
1173         return ret;
1174 }
1175
1176 static void remove_subsystem_dir(const char *name)
1177 {
1178         struct event_subsystem *system;
1179
1180         if (strcmp(name, TRACE_SYSTEM) == 0)
1181                 return;
1182
1183         list_for_each_entry(system, &event_subsystems, list) {
1184                 if (strcmp(system->name, name) == 0) {
1185                         if (!--system->nr_events) {
1186                                 struct event_filter *filter = system->filter;
1187
1188                                 debugfs_remove_recursive(system->entry);
1189                                 list_del(&system->list);
1190                                 if (filter) {
1191                                         kfree(filter->filter_string);
1192                                         kfree(filter);
1193                                 }
1194                                 kfree(system->name);
1195                                 kfree(system);
1196                         }
1197                         break;
1198                 }
1199         }
1200 }
1201
1202 /*
1203  * Must be called under locking both of event_mutex and trace_event_mutex.
1204  */
1205 static void __trace_remove_event_call(struct ftrace_event_call *call)
1206 {
1207         ftrace_event_enable_disable(call, 0);
1208         if (call->event.funcs)
1209                 __unregister_ftrace_event(&call->event);
1210         debugfs_remove_recursive(call->dir);
1211         list_del(&call->list);
1212         trace_destroy_fields(call);
1213         destroy_preds(call);
1214         remove_subsystem_dir(call->class->system);
1215 }
1216
1217 /* Remove an event_call */
1218 void trace_remove_event_call(struct ftrace_event_call *call)
1219 {
1220         mutex_lock(&event_mutex);
1221         down_write(&trace_event_mutex);
1222         __trace_remove_event_call(call);
1223         up_write(&trace_event_mutex);
1224         mutex_unlock(&event_mutex);
1225 }
1226
1227 #define for_each_event(event, start, end)                       \
1228         for (event = start;                                     \
1229              (unsigned long)event < (unsigned long)end;         \
1230              event++)
1231
1232 #ifdef CONFIG_MODULES
1233
1234 static LIST_HEAD(ftrace_module_file_list);
1235
1236 /*
1237  * Modules must own their file_operations to keep up with
1238  * reference counting.
1239  */
1240 struct ftrace_module_file_ops {
1241         struct list_head                list;
1242         struct module                   *mod;
1243         struct file_operations          id;
1244         struct file_operations          enable;
1245         struct file_operations          format;
1246         struct file_operations          filter;
1247 };
1248
1249 static struct ftrace_module_file_ops *
1250 trace_create_file_ops(struct module *mod)
1251 {
1252         struct ftrace_module_file_ops *file_ops;
1253
1254         /*
1255          * This is a bit of a PITA. To allow for correct reference
1256          * counting, modules must "own" their file_operations.
1257          * To do this, we allocate the file operations that will be
1258          * used in the event directory.
1259          */
1260
1261         file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1262         if (!file_ops)
1263                 return NULL;
1264
1265         file_ops->mod = mod;
1266
1267         file_ops->id = ftrace_event_id_fops;
1268         file_ops->id.owner = mod;
1269
1270         file_ops->enable = ftrace_enable_fops;
1271         file_ops->enable.owner = mod;
1272
1273         file_ops->filter = ftrace_event_filter_fops;
1274         file_ops->filter.owner = mod;
1275
1276         file_ops->format = ftrace_event_format_fops;
1277         file_ops->format.owner = mod;
1278
1279         list_add(&file_ops->list, &ftrace_module_file_list);
1280
1281         return file_ops;
1282 }
1283
1284 static void trace_module_add_events(struct module *mod)
1285 {
1286         struct ftrace_module_file_ops *file_ops = NULL;
1287         struct ftrace_event_call **call, **start, **end;
1288
1289         start = mod->trace_events;
1290         end = mod->trace_events + mod->num_trace_events;
1291
1292         if (start == end)
1293                 return;
1294
1295         file_ops = trace_create_file_ops(mod);
1296         if (!file_ops)
1297                 return;
1298
1299         for_each_event(call, start, end) {
1300                 __trace_add_event_call(*call, mod,
1301                                        &file_ops->id, &file_ops->enable,
1302                                        &file_ops->filter, &file_ops->format);
1303         }
1304 }
1305
1306 static void trace_module_remove_events(struct module *mod)
1307 {
1308         struct ftrace_module_file_ops *file_ops;
1309         struct ftrace_event_call *call, *p;
1310         bool found = false;
1311
1312         down_write(&trace_event_mutex);
1313         list_for_each_entry_safe(call, p, &ftrace_events, list) {
1314                 if (call->mod == mod) {
1315                         found = true;
1316                         __trace_remove_event_call(call);
1317                 }
1318         }
1319
1320         /* Now free the file_operations */
1321         list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1322                 if (file_ops->mod == mod)
1323                         break;
1324         }
1325         if (&file_ops->list != &ftrace_module_file_list) {
1326                 list_del(&file_ops->list);
1327                 kfree(file_ops);
1328         }
1329
1330         /*
1331          * It is safest to reset the ring buffer if the module being unloaded
1332          * registered any events.
1333          */
1334         if (found)
1335                 tracing_reset_current_online_cpus();
1336         up_write(&trace_event_mutex);
1337 }
1338
1339 static int trace_module_notify(struct notifier_block *self,
1340                                unsigned long val, void *data)
1341 {
1342         struct module *mod = data;
1343
1344         mutex_lock(&event_mutex);
1345         switch (val) {
1346         case MODULE_STATE_COMING:
1347                 trace_module_add_events(mod);
1348                 break;
1349         case MODULE_STATE_GOING:
1350                 trace_module_remove_events(mod);
1351                 break;
1352         }
1353         mutex_unlock(&event_mutex);
1354
1355         return 0;
1356 }
1357 #else
1358 static int trace_module_notify(struct notifier_block *self,
1359                                unsigned long val, void *data)
1360 {
1361         return 0;
1362 }
1363 #endif /* CONFIG_MODULES */
1364
1365 static struct notifier_block trace_module_nb = {
1366         .notifier_call = trace_module_notify,
1367         .priority = 0,
1368 };
1369
1370 extern struct ftrace_event_call *__start_ftrace_events[];
1371 extern struct ftrace_event_call *__stop_ftrace_events[];
1372
1373 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1374
1375 static __init int setup_trace_event(char *str)
1376 {
1377         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1378         ring_buffer_expanded = 1;
1379         tracing_selftest_disabled = 1;
1380
1381         return 1;
1382 }
1383 __setup("trace_event=", setup_trace_event);
1384
1385 static __init int event_trace_init(void)
1386 {
1387         struct ftrace_event_call **call;
1388         struct dentry *d_tracer;
1389         struct dentry *entry;
1390         struct dentry *d_events;
1391         int ret;
1392         char *buf = bootup_event_buf;
1393         char *token;
1394
1395         d_tracer = tracing_init_dentry();
1396         if (!d_tracer)
1397                 return 0;
1398
1399         entry = debugfs_create_file("available_events", 0444, d_tracer,
1400                                     (void *)&show_event_seq_ops,
1401                                     &ftrace_avail_fops);
1402         if (!entry)
1403                 pr_warning("Could not create debugfs "
1404                            "'available_events' entry\n");
1405
1406         entry = debugfs_create_file("set_event", 0644, d_tracer,
1407                                     (void *)&show_set_event_seq_ops,
1408                                     &ftrace_set_event_fops);
1409         if (!entry)
1410                 pr_warning("Could not create debugfs "
1411                            "'set_event' entry\n");
1412
1413         d_events = event_trace_events_dir();
1414         if (!d_events)
1415                 return 0;
1416
1417         /* ring buffer internal formats */
1418         trace_create_file("header_page", 0444, d_events,
1419                           ring_buffer_print_page_header,
1420                           &ftrace_show_header_fops);
1421
1422         trace_create_file("header_event", 0444, d_events,
1423                           ring_buffer_print_entry_header,
1424                           &ftrace_show_header_fops);
1425
1426         trace_create_file("enable", 0644, d_events,
1427                           NULL, &ftrace_system_enable_fops);
1428
1429         if (trace_define_common_fields())
1430                 pr_warning("tracing: Failed to allocate common fields");
1431
1432         for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
1433                 __trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
1434                                        &ftrace_enable_fops,
1435                                        &ftrace_event_filter_fops,
1436                                        &ftrace_event_format_fops);
1437         }
1438
1439         while (true) {
1440                 token = strsep(&buf, ",");
1441
1442                 if (!token)
1443                         break;
1444                 if (!*token)
1445                         continue;
1446
1447                 ret = ftrace_set_clr_event(token, 1);
1448                 if (ret)
1449                         pr_warning("Failed to enable trace event: %s\n", token);
1450         }
1451
1452         ret = register_module_notifier(&trace_module_nb);
1453         if (ret)
1454                 pr_warning("Failed to register trace events module notifier\n");
1455
1456         return 0;
1457 }
1458 fs_initcall(event_trace_init);
1459
1460 #ifdef CONFIG_FTRACE_STARTUP_TEST
1461
1462 static DEFINE_SPINLOCK(test_spinlock);
1463 static DEFINE_SPINLOCK(test_spinlock_irq);
1464 static DEFINE_MUTEX(test_mutex);
1465
1466 static __init void test_work(struct work_struct *dummy)
1467 {
1468         spin_lock(&test_spinlock);
1469         spin_lock_irq(&test_spinlock_irq);
1470         udelay(1);
1471         spin_unlock_irq(&test_spinlock_irq);
1472         spin_unlock(&test_spinlock);
1473
1474         mutex_lock(&test_mutex);
1475         msleep(1);
1476         mutex_unlock(&test_mutex);
1477 }
1478
1479 static __init int event_test_thread(void *unused)
1480 {
1481         void *test_malloc;
1482
1483         test_malloc = kmalloc(1234, GFP_KERNEL);
1484         if (!test_malloc)
1485                 pr_info("failed to kmalloc\n");
1486
1487         schedule_on_each_cpu(test_work);
1488
1489         kfree(test_malloc);
1490
1491         set_current_state(TASK_INTERRUPTIBLE);
1492         while (!kthread_should_stop())
1493                 schedule();
1494
1495         return 0;
1496 }
1497
1498 /*
1499  * Do various things that may trigger events.
1500  */
1501 static __init void event_test_stuff(void)
1502 {
1503         struct task_struct *test_thread;
1504
1505         test_thread = kthread_run(event_test_thread, NULL, "test-events");
1506         msleep(1);
1507         kthread_stop(test_thread);
1508 }
1509
1510 /*
1511  * For every trace event defined, we will test each trace point separately,
1512  * and then by groups, and finally all trace points.
1513  */
1514 static __init void event_trace_self_tests(void)
1515 {
1516         struct ftrace_event_call *call;
1517         struct event_subsystem *system;
1518         int ret;
1519
1520         pr_info("Running tests on trace events:\n");
1521
1522         list_for_each_entry(call, &ftrace_events, list) {
1523
1524                 /* Only test those that have a probe */
1525                 if (!call->class || !call->class->probe)
1526                         continue;
1527
1528 /*
1529  * Testing syscall events here is pretty useless, but
1530  * we still do it if configured. But this is time consuming.
1531  * What we really need is a user thread to perform the
1532  * syscalls as we test.
1533  */
1534 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1535                 if (call->class->system &&
1536                     strcmp(call->class->system, "syscalls") == 0)
1537                         continue;
1538 #endif
1539
1540                 pr_info("Testing event %s: ", call->name);
1541
1542                 /*
1543                  * If an event is already enabled, someone is using
1544                  * it and the self test should not be on.
1545                  */
1546                 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1547                         pr_warning("Enabled event during self test!\n");
1548                         WARN_ON_ONCE(1);
1549                         continue;
1550                 }
1551
1552                 ftrace_event_enable_disable(call, 1);
1553                 event_test_stuff();
1554                 ftrace_event_enable_disable(call, 0);
1555
1556                 pr_cont("OK\n");
1557         }
1558
1559         /* Now test at the sub system level */
1560
1561         pr_info("Running tests on trace event systems:\n");
1562
1563         list_for_each_entry(system, &event_subsystems, list) {
1564
1565                 /* the ftrace system is special, skip it */
1566                 if (strcmp(system->name, "ftrace") == 0)
1567                         continue;
1568
1569                 pr_info("Testing event system %s: ", system->name);
1570
1571                 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
1572                 if (WARN_ON_ONCE(ret)) {
1573                         pr_warning("error enabling system %s\n",
1574                                    system->name);
1575                         continue;
1576                 }
1577
1578                 event_test_stuff();
1579
1580                 ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
1581                 if (WARN_ON_ONCE(ret))
1582                         pr_warning("error disabling system %s\n",
1583                                    system->name);
1584
1585                 pr_cont("OK\n");
1586         }
1587
1588         /* Test with all events enabled */
1589
1590         pr_info("Running tests on all trace events:\n");
1591         pr_info("Testing all events: ");
1592
1593         ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
1594         if (WARN_ON_ONCE(ret)) {
1595                 pr_warning("error enabling all events\n");
1596                 return;
1597         }
1598
1599         event_test_stuff();
1600
1601         /* reset sysname */
1602         ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
1603         if (WARN_ON_ONCE(ret)) {
1604                 pr_warning("error disabling all events\n");
1605                 return;
1606         }
1607
1608         pr_cont("OK\n");
1609 }
1610
1611 #ifdef CONFIG_FUNCTION_TRACER
1612
1613 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1614
1615 static void
1616 function_test_events_call(unsigned long ip, unsigned long parent_ip)
1617 {
1618         struct ring_buffer_event *event;
1619         struct ring_buffer *buffer;
1620         struct ftrace_entry *entry;
1621         unsigned long flags;
1622         long disabled;
1623         int cpu;
1624         int pc;
1625
1626         pc = preempt_count();
1627         preempt_disable_notrace();
1628         cpu = raw_smp_processor_id();
1629         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1630
1631         if (disabled != 1)
1632                 goto out;
1633
1634         local_save_flags(flags);
1635
1636         event = trace_current_buffer_lock_reserve(&buffer,
1637                                                   TRACE_FN, sizeof(*entry),
1638                                                   flags, pc);
1639         if (!event)
1640                 goto out;
1641         entry   = ring_buffer_event_data(event);
1642         entry->ip                       = ip;
1643         entry->parent_ip                = parent_ip;
1644
1645         trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1646
1647  out:
1648         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1649         preempt_enable_notrace();
1650 }
1651
1652 static struct ftrace_ops trace_ops __initdata  =
1653 {
1654         .func = function_test_events_call,
1655 };
1656
1657 static __init void event_trace_self_test_with_function(void)
1658 {
1659         register_ftrace_function(&trace_ops);
1660         pr_info("Running tests again, along with the function tracer\n");
1661         event_trace_self_tests();
1662         unregister_ftrace_function(&trace_ops);
1663 }
1664 #else
1665 static __init void event_trace_self_test_with_function(void)
1666 {
1667 }
1668 #endif
1669
1670 static __init int event_trace_self_tests_init(void)
1671 {
1672         if (!tracing_selftest_disabled) {
1673                 event_trace_self_tests();
1674                 event_trace_self_test_with_function();
1675         }
1676
1677         return 0;
1678 }
1679
1680 late_initcall(event_trace_self_tests_init);
1681
1682 #endif