ftrace, ia64: Add macro for ftrace_caller
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 #define FTRACE_WARN_ON(cond)                    \
35         do {                                    \
36                 if (WARN_ON(cond))              \
37                         ftrace_kill();          \
38         } while (0)
39
40 #define FTRACE_WARN_ON_ONCE(cond)               \
41         do {                                    \
42                 if (WARN_ON_ONCE(cond))         \
43                         ftrace_kill();          \
44         } while (0)
45
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
49
50 /* set when tracing only a pid */
51 struct pid *ftrace_pid_trace;
52 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
53
54 /* Quick disabling of function tracer. */
55 int function_trace_stop;
56
57 /*
58  * ftrace_disabled is set when an anomaly is discovered.
59  * ftrace_disabled is much stronger than ftrace_enabled.
60  */
61 static int ftrace_disabled __read_mostly;
62
63 static DEFINE_SPINLOCK(ftrace_lock);
64 static DEFINE_MUTEX(ftrace_sysctl_lock);
65 static DEFINE_MUTEX(ftrace_start_lock);
66
67 static struct ftrace_ops ftrace_list_end __read_mostly =
68 {
69         .func = ftrace_stub,
70 };
71
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 {
79         struct ftrace_ops *op = ftrace_list;
80
81         /* in case someone actually ports this to alpha! */
82         read_barrier_depends();
83
84         while (op != &ftrace_list_end) {
85                 /* silly alpha */
86                 read_barrier_depends();
87                 op->func(ip, parent_ip);
88                 op = op->next;
89         };
90 }
91
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 {
94         if (!test_tsk_trace_trace(current))
95                 return;
96
97         ftrace_pid_function(ip, parent_ip);
98 }
99
100 static void set_ftrace_pid_function(ftrace_func_t func)
101 {
102         /* do not set ftrace_pid_function to itself! */
103         if (func != ftrace_pid_func)
104                 ftrace_pid_function = func;
105 }
106
107 /**
108  * clear_ftrace_function - reset the ftrace function
109  *
110  * This NULLs the ftrace function and in essence stops
111  * tracing.  There may be lag
112  */
113 void clear_ftrace_function(void)
114 {
115         ftrace_trace_function = ftrace_stub;
116         __ftrace_trace_function = ftrace_stub;
117         ftrace_pid_function = ftrace_stub;
118 }
119
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 /*
122  * For those archs that do not test ftrace_trace_stop in their
123  * mcount call site, we need to do it from C.
124  */
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 {
127         if (function_trace_stop)
128                 return;
129
130         __ftrace_trace_function(ip, parent_ip);
131 }
132 #endif
133
134 static int __register_ftrace_function(struct ftrace_ops *ops)
135 {
136         /* should not be called from interrupt context */
137         spin_lock(&ftrace_lock);
138
139         ops->next = ftrace_list;
140         /*
141          * We are entering ops into the ftrace_list but another
142          * CPU might be walking that list. We need to make sure
143          * the ops->next pointer is valid before another CPU sees
144          * the ops pointer included into the ftrace_list.
145          */
146         smp_wmb();
147         ftrace_list = ops;
148
149         if (ftrace_enabled) {
150                 ftrace_func_t func;
151
152                 if (ops->next == &ftrace_list_end)
153                         func = ops->func;
154                 else
155                         func = ftrace_list_func;
156
157                 if (ftrace_pid_trace) {
158                         set_ftrace_pid_function(func);
159                         func = ftrace_pid_func;
160                 }
161
162                 /*
163                  * For one func, simply call it directly.
164                  * For more than one func, call the chain.
165                  */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167                 ftrace_trace_function = func;
168 #else
169                 __ftrace_trace_function = func;
170                 ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172         }
173
174         spin_unlock(&ftrace_lock);
175
176         return 0;
177 }
178
179 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 {
181         struct ftrace_ops **p;
182         int ret = 0;
183
184         /* should not be called from interrupt context */
185         spin_lock(&ftrace_lock);
186
187         /*
188          * If we are removing the last function, then simply point
189          * to the ftrace_stub.
190          */
191         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
192                 ftrace_trace_function = ftrace_stub;
193                 ftrace_list = &ftrace_list_end;
194                 goto out;
195         }
196
197         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
198                 if (*p == ops)
199                         break;
200
201         if (*p != ops) {
202                 ret = -1;
203                 goto out;
204         }
205
206         *p = (*p)->next;
207
208         if (ftrace_enabled) {
209                 /* If we only have one func left, then call that directly */
210                 if (ftrace_list->next == &ftrace_list_end) {
211                         ftrace_func_t func = ftrace_list->func;
212
213                         if (ftrace_pid_trace) {
214                                 set_ftrace_pid_function(func);
215                                 func = ftrace_pid_func;
216                         }
217 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
218                         ftrace_trace_function = func;
219 #else
220                         __ftrace_trace_function = func;
221 #endif
222                 }
223         }
224
225  out:
226         spin_unlock(&ftrace_lock);
227
228         return ret;
229 }
230
231 static void ftrace_update_pid_func(void)
232 {
233         ftrace_func_t func;
234
235         /* should not be called from interrupt context */
236         spin_lock(&ftrace_lock);
237
238         if (ftrace_trace_function == ftrace_stub)
239                 goto out;
240
241         func = ftrace_trace_function;
242
243         if (ftrace_pid_trace) {
244                 set_ftrace_pid_function(func);
245                 func = ftrace_pid_func;
246         } else {
247                 if (func == ftrace_pid_func)
248                         func = ftrace_pid_function;
249         }
250
251 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
252         ftrace_trace_function = func;
253 #else
254         __ftrace_trace_function = func;
255 #endif
256
257  out:
258         spin_unlock(&ftrace_lock);
259 }
260
261 #ifdef CONFIG_DYNAMIC_FTRACE
262 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
263 # error Dynamic ftrace depends on MCOUNT_RECORD
264 #endif
265
266 enum {
267         FTRACE_ENABLE_CALLS             = (1 << 0),
268         FTRACE_DISABLE_CALLS            = (1 << 1),
269         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
270         FTRACE_ENABLE_MCOUNT            = (1 << 3),
271         FTRACE_DISABLE_MCOUNT           = (1 << 4),
272         FTRACE_START_FUNC_RET           = (1 << 5),
273         FTRACE_STOP_FUNC_RET            = (1 << 6),
274 };
275
276 static int ftrace_filtered;
277
278 static LIST_HEAD(ftrace_new_addrs);
279
280 static DEFINE_MUTEX(ftrace_regex_lock);
281
282 struct ftrace_page {
283         struct ftrace_page      *next;
284         int                     index;
285         struct dyn_ftrace       records[];
286 };
287
288 #define ENTRIES_PER_PAGE \
289   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
290
291 /* estimate from running different kernels */
292 #define NR_TO_INIT              10000
293
294 static struct ftrace_page       *ftrace_pages_start;
295 static struct ftrace_page       *ftrace_pages;
296
297 static struct dyn_ftrace *ftrace_free_records;
298
299
300 #ifdef CONFIG_KPROBES
301
302 static int frozen_record_count;
303
304 static inline void freeze_record(struct dyn_ftrace *rec)
305 {
306         if (!(rec->flags & FTRACE_FL_FROZEN)) {
307                 rec->flags |= FTRACE_FL_FROZEN;
308                 frozen_record_count++;
309         }
310 }
311
312 static inline void unfreeze_record(struct dyn_ftrace *rec)
313 {
314         if (rec->flags & FTRACE_FL_FROZEN) {
315                 rec->flags &= ~FTRACE_FL_FROZEN;
316                 frozen_record_count--;
317         }
318 }
319
320 static inline int record_frozen(struct dyn_ftrace *rec)
321 {
322         return rec->flags & FTRACE_FL_FROZEN;
323 }
324 #else
325 # define freeze_record(rec)                     ({ 0; })
326 # define unfreeze_record(rec)                   ({ 0; })
327 # define record_frozen(rec)                     ({ 0; })
328 #endif /* CONFIG_KPROBES */
329
330 static void ftrace_free_rec(struct dyn_ftrace *rec)
331 {
332         rec->ip = (unsigned long)ftrace_free_records;
333         ftrace_free_records = rec;
334         rec->flags |= FTRACE_FL_FREE;
335 }
336
337 void ftrace_release(void *start, unsigned long size)
338 {
339         struct dyn_ftrace *rec;
340         struct ftrace_page *pg;
341         unsigned long s = (unsigned long)start;
342         unsigned long e = s + size;
343         int i;
344
345         if (ftrace_disabled || !start)
346                 return;
347
348         /* should not be called from interrupt context */
349         spin_lock(&ftrace_lock);
350
351         for (pg = ftrace_pages_start; pg; pg = pg->next) {
352                 for (i = 0; i < pg->index; i++) {
353                         rec = &pg->records[i];
354
355                         if ((rec->ip >= s) && (rec->ip < e))
356                                 ftrace_free_rec(rec);
357                 }
358         }
359         spin_unlock(&ftrace_lock);
360 }
361
362 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
363 {
364         struct dyn_ftrace *rec;
365
366         /* First check for freed records */
367         if (ftrace_free_records) {
368                 rec = ftrace_free_records;
369
370                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
371                         FTRACE_WARN_ON_ONCE(1);
372                         ftrace_free_records = NULL;
373                         return NULL;
374                 }
375
376                 ftrace_free_records = (void *)rec->ip;
377                 memset(rec, 0, sizeof(*rec));
378                 return rec;
379         }
380
381         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
382                 if (!ftrace_pages->next) {
383                         /* allocate another page */
384                         ftrace_pages->next =
385                                 (void *)get_zeroed_page(GFP_KERNEL);
386                         if (!ftrace_pages->next)
387                                 return NULL;
388                 }
389                 ftrace_pages = ftrace_pages->next;
390         }
391
392         return &ftrace_pages->records[ftrace_pages->index++];
393 }
394
395 static struct dyn_ftrace *
396 ftrace_record_ip(unsigned long ip)
397 {
398         struct dyn_ftrace *rec;
399
400         if (ftrace_disabled)
401                 return NULL;
402
403         rec = ftrace_alloc_dyn_node(ip);
404         if (!rec)
405                 return NULL;
406
407         rec->ip = ip;
408
409         list_add(&rec->list, &ftrace_new_addrs);
410
411         return rec;
412 }
413
414 static void print_ip_ins(const char *fmt, unsigned char *p)
415 {
416         int i;
417
418         printk(KERN_CONT "%s", fmt);
419
420         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
421                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
422 }
423
424 static void ftrace_bug(int failed, unsigned long ip)
425 {
426         switch (failed) {
427         case -EFAULT:
428                 FTRACE_WARN_ON_ONCE(1);
429                 pr_info("ftrace faulted on modifying ");
430                 print_ip_sym(ip);
431                 break;
432         case -EINVAL:
433                 FTRACE_WARN_ON_ONCE(1);
434                 pr_info("ftrace failed to modify ");
435                 print_ip_sym(ip);
436                 print_ip_ins(" actual: ", (unsigned char *)ip);
437                 printk(KERN_CONT "\n");
438                 break;
439         case -EPERM:
440                 FTRACE_WARN_ON_ONCE(1);
441                 pr_info("ftrace faulted on writing ");
442                 print_ip_sym(ip);
443                 break;
444         default:
445                 FTRACE_WARN_ON_ONCE(1);
446                 pr_info("ftrace faulted on unknown error ");
447                 print_ip_sym(ip);
448         }
449 }
450
451
452 static int
453 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
454 {
455         unsigned long ip, fl;
456         unsigned long ftrace_addr;
457
458         ftrace_addr = (unsigned long)FTRACE_ADDR;
459
460         ip = rec->ip;
461
462         /*
463          * If this record is not to be traced and
464          * it is not enabled then do nothing.
465          *
466          * If this record is not to be traced and
467          * it is enabled then disabled it.
468          *
469          */
470         if (rec->flags & FTRACE_FL_NOTRACE) {
471                 if (rec->flags & FTRACE_FL_ENABLED)
472                         rec->flags &= ~FTRACE_FL_ENABLED;
473                 else
474                         return 0;
475
476         } else if (ftrace_filtered && enable) {
477                 /*
478                  * Filtering is on:
479                  */
480
481                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
482
483                 /* Record is filtered and enabled, do nothing */
484                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
485                         return 0;
486
487                 /* Record is not filtered and is not enabled do nothing */
488                 if (!fl)
489                         return 0;
490
491                 /* Record is not filtered but enabled, disable it */
492                 if (fl == FTRACE_FL_ENABLED)
493                         rec->flags &= ~FTRACE_FL_ENABLED;
494                 else
495                 /* Otherwise record is filtered but not enabled, enable it */
496                         rec->flags |= FTRACE_FL_ENABLED;
497         } else {
498                 /* Disable or not filtered */
499
500                 if (enable) {
501                         /* if record is enabled, do nothing */
502                         if (rec->flags & FTRACE_FL_ENABLED)
503                                 return 0;
504
505                         rec->flags |= FTRACE_FL_ENABLED;
506
507                 } else {
508
509                         /* if record is not enabled do nothing */
510                         if (!(rec->flags & FTRACE_FL_ENABLED))
511                                 return 0;
512
513                         rec->flags &= ~FTRACE_FL_ENABLED;
514                 }
515         }
516
517         if (rec->flags & FTRACE_FL_ENABLED)
518                 return ftrace_make_call(rec, ftrace_addr);
519         else
520                 return ftrace_make_nop(NULL, rec, ftrace_addr);
521 }
522
523 static void ftrace_replace_code(int enable)
524 {
525         int i, failed;
526         struct dyn_ftrace *rec;
527         struct ftrace_page *pg;
528
529         for (pg = ftrace_pages_start; pg; pg = pg->next) {
530                 for (i = 0; i < pg->index; i++) {
531                         rec = &pg->records[i];
532
533                         /*
534                          * Skip over free records and records that have
535                          * failed.
536                          */
537                         if (rec->flags & FTRACE_FL_FREE ||
538                             rec->flags & FTRACE_FL_FAILED)
539                                 continue;
540
541                         /* ignore updates to this record's mcount site */
542                         if (get_kprobe((void *)rec->ip)) {
543                                 freeze_record(rec);
544                                 continue;
545                         } else {
546                                 unfreeze_record(rec);
547                         }
548
549                         failed = __ftrace_replace_code(rec, enable);
550                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
551                                 rec->flags |= FTRACE_FL_FAILED;
552                                 if ((system_state == SYSTEM_BOOTING) ||
553                                     !core_kernel_text(rec->ip)) {
554                                         ftrace_free_rec(rec);
555                                 } else
556                                         ftrace_bug(failed, rec->ip);
557                         }
558                 }
559         }
560 }
561
562 static int
563 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
564 {
565         unsigned long ip;
566         int ret;
567
568         ip = rec->ip;
569
570         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
571         if (ret) {
572                 ftrace_bug(ret, ip);
573                 rec->flags |= FTRACE_FL_FAILED;
574                 return 0;
575         }
576         return 1;
577 }
578
579 static int __ftrace_modify_code(void *data)
580 {
581         int *command = data;
582
583         if (*command & FTRACE_ENABLE_CALLS)
584                 ftrace_replace_code(1);
585         else if (*command & FTRACE_DISABLE_CALLS)
586                 ftrace_replace_code(0);
587
588         if (*command & FTRACE_UPDATE_TRACE_FUNC)
589                 ftrace_update_ftrace_func(ftrace_trace_function);
590
591         if (*command & FTRACE_START_FUNC_RET)
592                 ftrace_enable_ftrace_graph_caller();
593         else if (*command & FTRACE_STOP_FUNC_RET)
594                 ftrace_disable_ftrace_graph_caller();
595
596         return 0;
597 }
598
599 static void ftrace_run_update_code(int command)
600 {
601         stop_machine(__ftrace_modify_code, &command, NULL);
602 }
603
604 static ftrace_func_t saved_ftrace_func;
605 static int ftrace_start_up;
606
607 static void ftrace_startup_enable(int command)
608 {
609         if (saved_ftrace_func != ftrace_trace_function) {
610                 saved_ftrace_func = ftrace_trace_function;
611                 command |= FTRACE_UPDATE_TRACE_FUNC;
612         }
613
614         if (!command || !ftrace_enabled)
615                 return;
616
617         ftrace_run_update_code(command);
618 }
619
620 static void ftrace_startup(int command)
621 {
622         if (unlikely(ftrace_disabled))
623                 return;
624
625         mutex_lock(&ftrace_start_lock);
626         ftrace_start_up++;
627         command |= FTRACE_ENABLE_CALLS;
628
629         ftrace_startup_enable(command);
630
631         mutex_unlock(&ftrace_start_lock);
632 }
633
634 static void ftrace_shutdown(int command)
635 {
636         if (unlikely(ftrace_disabled))
637                 return;
638
639         mutex_lock(&ftrace_start_lock);
640         ftrace_start_up--;
641         if (!ftrace_start_up)
642                 command |= FTRACE_DISABLE_CALLS;
643
644         if (saved_ftrace_func != ftrace_trace_function) {
645                 saved_ftrace_func = ftrace_trace_function;
646                 command |= FTRACE_UPDATE_TRACE_FUNC;
647         }
648
649         if (!command || !ftrace_enabled)
650                 goto out;
651
652         ftrace_run_update_code(command);
653  out:
654         mutex_unlock(&ftrace_start_lock);
655 }
656
657 static void ftrace_startup_sysctl(void)
658 {
659         int command = FTRACE_ENABLE_MCOUNT;
660
661         if (unlikely(ftrace_disabled))
662                 return;
663
664         mutex_lock(&ftrace_start_lock);
665         /* Force update next time */
666         saved_ftrace_func = NULL;
667         /* ftrace_start_up is true if we want ftrace running */
668         if (ftrace_start_up)
669                 command |= FTRACE_ENABLE_CALLS;
670
671         ftrace_run_update_code(command);
672         mutex_unlock(&ftrace_start_lock);
673 }
674
675 static void ftrace_shutdown_sysctl(void)
676 {
677         int command = FTRACE_DISABLE_MCOUNT;
678
679         if (unlikely(ftrace_disabled))
680                 return;
681
682         mutex_lock(&ftrace_start_lock);
683         /* ftrace_start_up is true if ftrace is running */
684         if (ftrace_start_up)
685                 command |= FTRACE_DISABLE_CALLS;
686
687         ftrace_run_update_code(command);
688         mutex_unlock(&ftrace_start_lock);
689 }
690
691 static cycle_t          ftrace_update_time;
692 static unsigned long    ftrace_update_cnt;
693 unsigned long           ftrace_update_tot_cnt;
694
695 static int ftrace_update_code(struct module *mod)
696 {
697         struct dyn_ftrace *p, *t;
698         cycle_t start, stop;
699
700         start = ftrace_now(raw_smp_processor_id());
701         ftrace_update_cnt = 0;
702
703         list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
704
705                 /* If something went wrong, bail without enabling anything */
706                 if (unlikely(ftrace_disabled))
707                         return -1;
708
709                 list_del_init(&p->list);
710
711                 /* convert record (i.e, patch mcount-call with NOP) */
712                 if (ftrace_code_disable(mod, p)) {
713                         p->flags |= FTRACE_FL_CONVERTED;
714                         ftrace_update_cnt++;
715                 } else
716                         ftrace_free_rec(p);
717         }
718
719         stop = ftrace_now(raw_smp_processor_id());
720         ftrace_update_time = stop - start;
721         ftrace_update_tot_cnt += ftrace_update_cnt;
722
723         return 0;
724 }
725
726 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
727 {
728         struct ftrace_page *pg;
729         int cnt;
730         int i;
731
732         /* allocate a few pages */
733         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
734         if (!ftrace_pages_start)
735                 return -1;
736
737         /*
738          * Allocate a few more pages.
739          *
740          * TODO: have some parser search vmlinux before
741          *   final linking to find all calls to ftrace.
742          *   Then we can:
743          *    a) know how many pages to allocate.
744          *     and/or
745          *    b) set up the table then.
746          *
747          *  The dynamic code is still necessary for
748          *  modules.
749          */
750
751         pg = ftrace_pages = ftrace_pages_start;
752
753         cnt = num_to_init / ENTRIES_PER_PAGE;
754         pr_info("ftrace: allocating %ld entries in %d pages\n",
755                 num_to_init, cnt + 1);
756
757         for (i = 0; i < cnt; i++) {
758                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
759
760                 /* If we fail, we'll try later anyway */
761                 if (!pg->next)
762                         break;
763
764                 pg = pg->next;
765         }
766
767         return 0;
768 }
769
770 enum {
771         FTRACE_ITER_FILTER      = (1 << 0),
772         FTRACE_ITER_CONT        = (1 << 1),
773         FTRACE_ITER_NOTRACE     = (1 << 2),
774         FTRACE_ITER_FAILURES    = (1 << 3),
775 };
776
777 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
778
779 struct ftrace_iterator {
780         struct ftrace_page      *pg;
781         int                     idx;
782         unsigned                flags;
783         unsigned char           buffer[FTRACE_BUFF_MAX+1];
784         unsigned                buffer_idx;
785         unsigned                filtered;
786 };
787
788 static void *
789 t_next(struct seq_file *m, void *v, loff_t *pos)
790 {
791         struct ftrace_iterator *iter = m->private;
792         struct dyn_ftrace *rec = NULL;
793
794         (*pos)++;
795
796         /* should not be called from interrupt context */
797         spin_lock(&ftrace_lock);
798  retry:
799         if (iter->idx >= iter->pg->index) {
800                 if (iter->pg->next) {
801                         iter->pg = iter->pg->next;
802                         iter->idx = 0;
803                         goto retry;
804                 } else {
805                         iter->idx = -1;
806                 }
807         } else {
808                 rec = &iter->pg->records[iter->idx++];
809                 if ((rec->flags & FTRACE_FL_FREE) ||
810
811                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
812                      (rec->flags & FTRACE_FL_FAILED)) ||
813
814                     ((iter->flags & FTRACE_ITER_FAILURES) &&
815                      !(rec->flags & FTRACE_FL_FAILED)) ||
816
817                     ((iter->flags & FTRACE_ITER_FILTER) &&
818                      !(rec->flags & FTRACE_FL_FILTER)) ||
819
820                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
821                      !(rec->flags & FTRACE_FL_NOTRACE))) {
822                         rec = NULL;
823                         goto retry;
824                 }
825         }
826         spin_unlock(&ftrace_lock);
827
828         return rec;
829 }
830
831 static void *t_start(struct seq_file *m, loff_t *pos)
832 {
833         struct ftrace_iterator *iter = m->private;
834         void *p = NULL;
835
836         if (*pos > 0) {
837                 if (iter->idx < 0)
838                         return p;
839                 (*pos)--;
840                 iter->idx--;
841         }
842
843         p = t_next(m, p, pos);
844
845         return p;
846 }
847
848 static void t_stop(struct seq_file *m, void *p)
849 {
850 }
851
852 static int t_show(struct seq_file *m, void *v)
853 {
854         struct dyn_ftrace *rec = v;
855         char str[KSYM_SYMBOL_LEN];
856
857         if (!rec)
858                 return 0;
859
860         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
861
862         seq_printf(m, "%s\n", str);
863
864         return 0;
865 }
866
867 static struct seq_operations show_ftrace_seq_ops = {
868         .start = t_start,
869         .next = t_next,
870         .stop = t_stop,
871         .show = t_show,
872 };
873
874 static int
875 ftrace_avail_open(struct inode *inode, struct file *file)
876 {
877         struct ftrace_iterator *iter;
878         int ret;
879
880         if (unlikely(ftrace_disabled))
881                 return -ENODEV;
882
883         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
884         if (!iter)
885                 return -ENOMEM;
886
887         iter->pg = ftrace_pages_start;
888
889         ret = seq_open(file, &show_ftrace_seq_ops);
890         if (!ret) {
891                 struct seq_file *m = file->private_data;
892
893                 m->private = iter;
894         } else {
895                 kfree(iter);
896         }
897
898         return ret;
899 }
900
901 int ftrace_avail_release(struct inode *inode, struct file *file)
902 {
903         struct seq_file *m = (struct seq_file *)file->private_data;
904         struct ftrace_iterator *iter = m->private;
905
906         seq_release(inode, file);
907         kfree(iter);
908
909         return 0;
910 }
911
912 static int
913 ftrace_failures_open(struct inode *inode, struct file *file)
914 {
915         int ret;
916         struct seq_file *m;
917         struct ftrace_iterator *iter;
918
919         ret = ftrace_avail_open(inode, file);
920         if (!ret) {
921                 m = (struct seq_file *)file->private_data;
922                 iter = (struct ftrace_iterator *)m->private;
923                 iter->flags = FTRACE_ITER_FAILURES;
924         }
925
926         return ret;
927 }
928
929
930 static void ftrace_filter_reset(int enable)
931 {
932         struct ftrace_page *pg;
933         struct dyn_ftrace *rec;
934         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
935         unsigned i;
936
937         /* should not be called from interrupt context */
938         spin_lock(&ftrace_lock);
939         if (enable)
940                 ftrace_filtered = 0;
941         pg = ftrace_pages_start;
942         while (pg) {
943                 for (i = 0; i < pg->index; i++) {
944                         rec = &pg->records[i];
945                         if (rec->flags & FTRACE_FL_FAILED)
946                                 continue;
947                         rec->flags &= ~type;
948                 }
949                 pg = pg->next;
950         }
951         spin_unlock(&ftrace_lock);
952 }
953
954 static int
955 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
956 {
957         struct ftrace_iterator *iter;
958         int ret = 0;
959
960         if (unlikely(ftrace_disabled))
961                 return -ENODEV;
962
963         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
964         if (!iter)
965                 return -ENOMEM;
966
967         mutex_lock(&ftrace_regex_lock);
968         if ((file->f_mode & FMODE_WRITE) &&
969             !(file->f_flags & O_APPEND))
970                 ftrace_filter_reset(enable);
971
972         if (file->f_mode & FMODE_READ) {
973                 iter->pg = ftrace_pages_start;
974                 iter->flags = enable ? FTRACE_ITER_FILTER :
975                         FTRACE_ITER_NOTRACE;
976
977                 ret = seq_open(file, &show_ftrace_seq_ops);
978                 if (!ret) {
979                         struct seq_file *m = file->private_data;
980                         m->private = iter;
981                 } else
982                         kfree(iter);
983         } else
984                 file->private_data = iter;
985         mutex_unlock(&ftrace_regex_lock);
986
987         return ret;
988 }
989
990 static int
991 ftrace_filter_open(struct inode *inode, struct file *file)
992 {
993         return ftrace_regex_open(inode, file, 1);
994 }
995
996 static int
997 ftrace_notrace_open(struct inode *inode, struct file *file)
998 {
999         return ftrace_regex_open(inode, file, 0);
1000 }
1001
1002 static ssize_t
1003 ftrace_regex_read(struct file *file, char __user *ubuf,
1004                        size_t cnt, loff_t *ppos)
1005 {
1006         if (file->f_mode & FMODE_READ)
1007                 return seq_read(file, ubuf, cnt, ppos);
1008         else
1009                 return -EPERM;
1010 }
1011
1012 static loff_t
1013 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1014 {
1015         loff_t ret;
1016
1017         if (file->f_mode & FMODE_READ)
1018                 ret = seq_lseek(file, offset, origin);
1019         else
1020                 file->f_pos = ret = 1;
1021
1022         return ret;
1023 }
1024
1025 enum {
1026         MATCH_FULL,
1027         MATCH_FRONT_ONLY,
1028         MATCH_MIDDLE_ONLY,
1029         MATCH_END_ONLY,
1030 };
1031
1032 static void
1033 ftrace_match(unsigned char *buff, int len, int enable)
1034 {
1035         char str[KSYM_SYMBOL_LEN];
1036         char *search = NULL;
1037         struct ftrace_page *pg;
1038         struct dyn_ftrace *rec;
1039         int type = MATCH_FULL;
1040         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1041         unsigned i, match = 0, search_len = 0;
1042         int not = 0;
1043
1044         if (buff[0] == '!') {
1045                 not = 1;
1046                 buff++;
1047                 len--;
1048         }
1049
1050         for (i = 0; i < len; i++) {
1051                 if (buff[i] == '*') {
1052                         if (!i) {
1053                                 search = buff + i + 1;
1054                                 type = MATCH_END_ONLY;
1055                                 search_len = len - (i + 1);
1056                         } else {
1057                                 if (type == MATCH_END_ONLY) {
1058                                         type = MATCH_MIDDLE_ONLY;
1059                                 } else {
1060                                         match = i;
1061                                         type = MATCH_FRONT_ONLY;
1062                                 }
1063                                 buff[i] = 0;
1064                                 break;
1065                         }
1066                 }
1067         }
1068
1069         /* should not be called from interrupt context */
1070         spin_lock(&ftrace_lock);
1071         if (enable)
1072                 ftrace_filtered = 1;
1073         pg = ftrace_pages_start;
1074         while (pg) {
1075                 for (i = 0; i < pg->index; i++) {
1076                         int matched = 0;
1077                         char *ptr;
1078
1079                         rec = &pg->records[i];
1080                         if (rec->flags & FTRACE_FL_FAILED)
1081                                 continue;
1082                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1083                         switch (type) {
1084                         case MATCH_FULL:
1085                                 if (strcmp(str, buff) == 0)
1086                                         matched = 1;
1087                                 break;
1088                         case MATCH_FRONT_ONLY:
1089                                 if (memcmp(str, buff, match) == 0)
1090                                         matched = 1;
1091                                 break;
1092                         case MATCH_MIDDLE_ONLY:
1093                                 if (strstr(str, search))
1094                                         matched = 1;
1095                                 break;
1096                         case MATCH_END_ONLY:
1097                                 ptr = strstr(str, search);
1098                                 if (ptr && (ptr[search_len] == 0))
1099                                         matched = 1;
1100                                 break;
1101                         }
1102                         if (matched) {
1103                                 if (not)
1104                                         rec->flags &= ~flag;
1105                                 else
1106                                         rec->flags |= flag;
1107                         }
1108                 }
1109                 pg = pg->next;
1110         }
1111         spin_unlock(&ftrace_lock);
1112 }
1113
1114 static ssize_t
1115 ftrace_regex_write(struct file *file, const char __user *ubuf,
1116                    size_t cnt, loff_t *ppos, int enable)
1117 {
1118         struct ftrace_iterator *iter;
1119         char ch;
1120         size_t read = 0;
1121         ssize_t ret;
1122
1123         if (!cnt || cnt < 0)
1124                 return 0;
1125
1126         mutex_lock(&ftrace_regex_lock);
1127
1128         if (file->f_mode & FMODE_READ) {
1129                 struct seq_file *m = file->private_data;
1130                 iter = m->private;
1131         } else
1132                 iter = file->private_data;
1133
1134         if (!*ppos) {
1135                 iter->flags &= ~FTRACE_ITER_CONT;
1136                 iter->buffer_idx = 0;
1137         }
1138
1139         ret = get_user(ch, ubuf++);
1140         if (ret)
1141                 goto out;
1142         read++;
1143         cnt--;
1144
1145         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1146                 /* skip white space */
1147                 while (cnt && isspace(ch)) {
1148                         ret = get_user(ch, ubuf++);
1149                         if (ret)
1150                                 goto out;
1151                         read++;
1152                         cnt--;
1153                 }
1154
1155                 if (isspace(ch)) {
1156                         file->f_pos += read;
1157                         ret = read;
1158                         goto out;
1159                 }
1160
1161                 iter->buffer_idx = 0;
1162         }
1163
1164         while (cnt && !isspace(ch)) {
1165                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1166                         iter->buffer[iter->buffer_idx++] = ch;
1167                 else {
1168                         ret = -EINVAL;
1169                         goto out;
1170                 }
1171                 ret = get_user(ch, ubuf++);
1172                 if (ret)
1173                         goto out;
1174                 read++;
1175                 cnt--;
1176         }
1177
1178         if (isspace(ch)) {
1179                 iter->filtered++;
1180                 iter->buffer[iter->buffer_idx] = 0;
1181                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1182                 iter->buffer_idx = 0;
1183         } else
1184                 iter->flags |= FTRACE_ITER_CONT;
1185
1186
1187         file->f_pos += read;
1188
1189         ret = read;
1190  out:
1191         mutex_unlock(&ftrace_regex_lock);
1192
1193         return ret;
1194 }
1195
1196 static ssize_t
1197 ftrace_filter_write(struct file *file, const char __user *ubuf,
1198                     size_t cnt, loff_t *ppos)
1199 {
1200         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1201 }
1202
1203 static ssize_t
1204 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1205                      size_t cnt, loff_t *ppos)
1206 {
1207         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1208 }
1209
1210 static void
1211 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1212 {
1213         if (unlikely(ftrace_disabled))
1214                 return;
1215
1216         mutex_lock(&ftrace_regex_lock);
1217         if (reset)
1218                 ftrace_filter_reset(enable);
1219         if (buf)
1220                 ftrace_match(buf, len, enable);
1221         mutex_unlock(&ftrace_regex_lock);
1222 }
1223
1224 /**
1225  * ftrace_set_filter - set a function to filter on in ftrace
1226  * @buf - the string that holds the function filter text.
1227  * @len - the length of the string.
1228  * @reset - non zero to reset all filters before applying this filter.
1229  *
1230  * Filters denote which functions should be enabled when tracing is enabled.
1231  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1232  */
1233 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1234 {
1235         ftrace_set_regex(buf, len, reset, 1);
1236 }
1237
1238 /**
1239  * ftrace_set_notrace - set a function to not trace in ftrace
1240  * @buf - the string that holds the function notrace text.
1241  * @len - the length of the string.
1242  * @reset - non zero to reset all filters before applying this filter.
1243  *
1244  * Notrace Filters denote which functions should not be enabled when tracing
1245  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1246  * for tracing.
1247  */
1248 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1249 {
1250         ftrace_set_regex(buf, len, reset, 0);
1251 }
1252
1253 static int
1254 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1255 {
1256         struct seq_file *m = (struct seq_file *)file->private_data;
1257         struct ftrace_iterator *iter;
1258
1259         mutex_lock(&ftrace_regex_lock);
1260         if (file->f_mode & FMODE_READ) {
1261                 iter = m->private;
1262
1263                 seq_release(inode, file);
1264         } else
1265                 iter = file->private_data;
1266
1267         if (iter->buffer_idx) {
1268                 iter->filtered++;
1269                 iter->buffer[iter->buffer_idx] = 0;
1270                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1271         }
1272
1273         mutex_lock(&ftrace_sysctl_lock);
1274         mutex_lock(&ftrace_start_lock);
1275         if (ftrace_start_up && ftrace_enabled)
1276                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1277         mutex_unlock(&ftrace_start_lock);
1278         mutex_unlock(&ftrace_sysctl_lock);
1279
1280         kfree(iter);
1281         mutex_unlock(&ftrace_regex_lock);
1282         return 0;
1283 }
1284
1285 static int
1286 ftrace_filter_release(struct inode *inode, struct file *file)
1287 {
1288         return ftrace_regex_release(inode, file, 1);
1289 }
1290
1291 static int
1292 ftrace_notrace_release(struct inode *inode, struct file *file)
1293 {
1294         return ftrace_regex_release(inode, file, 0);
1295 }
1296
1297 static struct file_operations ftrace_avail_fops = {
1298         .open = ftrace_avail_open,
1299         .read = seq_read,
1300         .llseek = seq_lseek,
1301         .release = ftrace_avail_release,
1302 };
1303
1304 static struct file_operations ftrace_failures_fops = {
1305         .open = ftrace_failures_open,
1306         .read = seq_read,
1307         .llseek = seq_lseek,
1308         .release = ftrace_avail_release,
1309 };
1310
1311 static struct file_operations ftrace_filter_fops = {
1312         .open = ftrace_filter_open,
1313         .read = ftrace_regex_read,
1314         .write = ftrace_filter_write,
1315         .llseek = ftrace_regex_lseek,
1316         .release = ftrace_filter_release,
1317 };
1318
1319 static struct file_operations ftrace_notrace_fops = {
1320         .open = ftrace_notrace_open,
1321         .read = ftrace_regex_read,
1322         .write = ftrace_notrace_write,
1323         .llseek = ftrace_regex_lseek,
1324         .release = ftrace_notrace_release,
1325 };
1326
1327 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1328
1329 static DEFINE_MUTEX(graph_lock);
1330
1331 int ftrace_graph_count;
1332 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1333
1334 static void *
1335 g_next(struct seq_file *m, void *v, loff_t *pos)
1336 {
1337         unsigned long *array = m->private;
1338         int index = *pos;
1339
1340         (*pos)++;
1341
1342         if (index >= ftrace_graph_count)
1343                 return NULL;
1344
1345         return &array[index];
1346 }
1347
1348 static void *g_start(struct seq_file *m, loff_t *pos)
1349 {
1350         void *p = NULL;
1351
1352         mutex_lock(&graph_lock);
1353
1354         p = g_next(m, p, pos);
1355
1356         return p;
1357 }
1358
1359 static void g_stop(struct seq_file *m, void *p)
1360 {
1361         mutex_unlock(&graph_lock);
1362 }
1363
1364 static int g_show(struct seq_file *m, void *v)
1365 {
1366         unsigned long *ptr = v;
1367         char str[KSYM_SYMBOL_LEN];
1368
1369         if (!ptr)
1370                 return 0;
1371
1372         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1373
1374         seq_printf(m, "%s\n", str);
1375
1376         return 0;
1377 }
1378
1379 static struct seq_operations ftrace_graph_seq_ops = {
1380         .start = g_start,
1381         .next = g_next,
1382         .stop = g_stop,
1383         .show = g_show,
1384 };
1385
1386 static int
1387 ftrace_graph_open(struct inode *inode, struct file *file)
1388 {
1389         int ret = 0;
1390
1391         if (unlikely(ftrace_disabled))
1392                 return -ENODEV;
1393
1394         mutex_lock(&graph_lock);
1395         if ((file->f_mode & FMODE_WRITE) &&
1396             !(file->f_flags & O_APPEND)) {
1397                 ftrace_graph_count = 0;
1398                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1399         }
1400
1401         if (file->f_mode & FMODE_READ) {
1402                 ret = seq_open(file, &ftrace_graph_seq_ops);
1403                 if (!ret) {
1404                         struct seq_file *m = file->private_data;
1405                         m->private = ftrace_graph_funcs;
1406                 }
1407         } else
1408                 file->private_data = ftrace_graph_funcs;
1409         mutex_unlock(&graph_lock);
1410
1411         return ret;
1412 }
1413
1414 static ssize_t
1415 ftrace_graph_read(struct file *file, char __user *ubuf,
1416                        size_t cnt, loff_t *ppos)
1417 {
1418         if (file->f_mode & FMODE_READ)
1419                 return seq_read(file, ubuf, cnt, ppos);
1420         else
1421                 return -EPERM;
1422 }
1423
1424 static int
1425 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1426 {
1427         char str[KSYM_SYMBOL_LEN];
1428         struct dyn_ftrace *rec;
1429         struct ftrace_page *pg;
1430         int found = 0;
1431         int i, j;
1432
1433         if (ftrace_disabled)
1434                 return -ENODEV;
1435
1436         /* should not be called from interrupt context */
1437         spin_lock(&ftrace_lock);
1438
1439         for (pg = ftrace_pages_start; pg; pg = pg->next) {
1440                 for (i = 0; i < pg->index; i++) {
1441                         rec = &pg->records[i];
1442
1443                         if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1444                                 continue;
1445
1446                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1447                         if (strcmp(str, buffer) == 0) {
1448                                 found = 1;
1449                                 for (j = 0; j < idx; j++)
1450                                         if (array[j] == rec->ip) {
1451                                                 found = 0;
1452                                                 break;
1453                                         }
1454                                 if (found)
1455                                         array[idx] = rec->ip;
1456                                 break;
1457                         }
1458                 }
1459         }
1460         spin_unlock(&ftrace_lock);
1461
1462         return found ? 0 : -EINVAL;
1463 }
1464
1465 static ssize_t
1466 ftrace_graph_write(struct file *file, const char __user *ubuf,
1467                    size_t cnt, loff_t *ppos)
1468 {
1469         unsigned char buffer[FTRACE_BUFF_MAX+1];
1470         unsigned long *array;
1471         size_t read = 0;
1472         ssize_t ret;
1473         int index = 0;
1474         char ch;
1475
1476         if (!cnt || cnt < 0)
1477                 return 0;
1478
1479         mutex_lock(&graph_lock);
1480
1481         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1482                 ret = -EBUSY;
1483                 goto out;
1484         }
1485
1486         if (file->f_mode & FMODE_READ) {
1487                 struct seq_file *m = file->private_data;
1488                 array = m->private;
1489         } else
1490                 array = file->private_data;
1491
1492         ret = get_user(ch, ubuf++);
1493         if (ret)
1494                 goto out;
1495         read++;
1496         cnt--;
1497
1498         /* skip white space */
1499         while (cnt && isspace(ch)) {
1500                 ret = get_user(ch, ubuf++);
1501                 if (ret)
1502                         goto out;
1503                 read++;
1504                 cnt--;
1505         }
1506
1507         if (isspace(ch)) {
1508                 *ppos += read;
1509                 ret = read;
1510                 goto out;
1511         }
1512
1513         while (cnt && !isspace(ch)) {
1514                 if (index < FTRACE_BUFF_MAX)
1515                         buffer[index++] = ch;
1516                 else {
1517                         ret = -EINVAL;
1518                         goto out;
1519                 }
1520                 ret = get_user(ch, ubuf++);
1521                 if (ret)
1522                         goto out;
1523                 read++;
1524                 cnt--;
1525         }
1526         buffer[index] = 0;
1527
1528         /* we allow only one at a time */
1529         ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1530         if (ret)
1531                 goto out;
1532
1533         ftrace_graph_count++;
1534
1535         file->f_pos += read;
1536
1537         ret = read;
1538  out:
1539         mutex_unlock(&graph_lock);
1540
1541         return ret;
1542 }
1543
1544 static const struct file_operations ftrace_graph_fops = {
1545         .open = ftrace_graph_open,
1546         .read = ftrace_graph_read,
1547         .write = ftrace_graph_write,
1548 };
1549 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1550
1551 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1552 {
1553         struct dentry *entry;
1554
1555         entry = debugfs_create_file("available_filter_functions", 0444,
1556                                     d_tracer, NULL, &ftrace_avail_fops);
1557         if (!entry)
1558                 pr_warning("Could not create debugfs "
1559                            "'available_filter_functions' entry\n");
1560
1561         entry = debugfs_create_file("failures", 0444,
1562                                     d_tracer, NULL, &ftrace_failures_fops);
1563         if (!entry)
1564                 pr_warning("Could not create debugfs 'failures' entry\n");
1565
1566         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1567                                     NULL, &ftrace_filter_fops);
1568         if (!entry)
1569                 pr_warning("Could not create debugfs "
1570                            "'set_ftrace_filter' entry\n");
1571
1572         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1573                                     NULL, &ftrace_notrace_fops);
1574         if (!entry)
1575                 pr_warning("Could not create debugfs "
1576                            "'set_ftrace_notrace' entry\n");
1577
1578 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1579         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1580                                     NULL,
1581                                     &ftrace_graph_fops);
1582         if (!entry)
1583                 pr_warning("Could not create debugfs "
1584                            "'set_graph_function' entry\n");
1585 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1586
1587         return 0;
1588 }
1589
1590 static int ftrace_convert_nops(struct module *mod,
1591                                unsigned long *start,
1592                                unsigned long *end)
1593 {
1594         unsigned long *p;
1595         unsigned long addr;
1596         unsigned long flags;
1597
1598         mutex_lock(&ftrace_start_lock);
1599         p = start;
1600         while (p < end) {
1601                 addr = ftrace_call_adjust(*p++);
1602                 /*
1603                  * Some architecture linkers will pad between
1604                  * the different mcount_loc sections of different
1605                  * object files to satisfy alignments.
1606                  * Skip any NULL pointers.
1607                  */
1608                 if (!addr)
1609                         continue;
1610                 ftrace_record_ip(addr);
1611         }
1612
1613         /* disable interrupts to prevent kstop machine */
1614         local_irq_save(flags);
1615         ftrace_update_code(mod);
1616         local_irq_restore(flags);
1617         mutex_unlock(&ftrace_start_lock);
1618
1619         return 0;
1620 }
1621
1622 void ftrace_init_module(struct module *mod,
1623                         unsigned long *start, unsigned long *end)
1624 {
1625         if (ftrace_disabled || start == end)
1626                 return;
1627         ftrace_convert_nops(mod, start, end);
1628 }
1629
1630 extern unsigned long __start_mcount_loc[];
1631 extern unsigned long __stop_mcount_loc[];
1632
1633 void __init ftrace_init(void)
1634 {
1635         unsigned long count, addr, flags;
1636         int ret;
1637
1638         /* Keep the ftrace pointer to the stub */
1639         addr = (unsigned long)ftrace_stub;
1640
1641         local_irq_save(flags);
1642         ftrace_dyn_arch_init(&addr);
1643         local_irq_restore(flags);
1644
1645         /* ftrace_dyn_arch_init places the return code in addr */
1646         if (addr)
1647                 goto failed;
1648
1649         count = __stop_mcount_loc - __start_mcount_loc;
1650
1651         ret = ftrace_dyn_table_alloc(count);
1652         if (ret)
1653                 goto failed;
1654
1655         last_ftrace_enabled = ftrace_enabled = 1;
1656
1657         ret = ftrace_convert_nops(NULL,
1658                                   __start_mcount_loc,
1659                                   __stop_mcount_loc);
1660
1661         return;
1662  failed:
1663         ftrace_disabled = 1;
1664 }
1665
1666 #else
1667
1668 static int __init ftrace_nodyn_init(void)
1669 {
1670         ftrace_enabled = 1;
1671         return 0;
1672 }
1673 device_initcall(ftrace_nodyn_init);
1674
1675 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1676 static inline void ftrace_startup_enable(int command) { }
1677 /* Keep as macros so we do not need to define the commands */
1678 # define ftrace_startup(command)        do { } while (0)
1679 # define ftrace_shutdown(command)       do { } while (0)
1680 # define ftrace_startup_sysctl()        do { } while (0)
1681 # define ftrace_shutdown_sysctl()       do { } while (0)
1682 #endif /* CONFIG_DYNAMIC_FTRACE */
1683
1684 static ssize_t
1685 ftrace_pid_read(struct file *file, char __user *ubuf,
1686                        size_t cnt, loff_t *ppos)
1687 {
1688         char buf[64];
1689         int r;
1690
1691         if (ftrace_pid_trace == ftrace_swapper_pid)
1692                 r = sprintf(buf, "swapper tasks\n");
1693         else if (ftrace_pid_trace)
1694                 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1695         else
1696                 r = sprintf(buf, "no pid\n");
1697
1698         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1699 }
1700
1701 static void clear_ftrace_swapper(void)
1702 {
1703         struct task_struct *p;
1704         int cpu;
1705
1706         get_online_cpus();
1707         for_each_online_cpu(cpu) {
1708                 p = idle_task(cpu);
1709                 clear_tsk_trace_trace(p);
1710         }
1711         put_online_cpus();
1712 }
1713
1714 static void set_ftrace_swapper(void)
1715 {
1716         struct task_struct *p;
1717         int cpu;
1718
1719         get_online_cpus();
1720         for_each_online_cpu(cpu) {
1721                 p = idle_task(cpu);
1722                 set_tsk_trace_trace(p);
1723         }
1724         put_online_cpus();
1725 }
1726
1727 static void clear_ftrace_pid(struct pid *pid)
1728 {
1729         struct task_struct *p;
1730
1731         do_each_pid_task(pid, PIDTYPE_PID, p) {
1732                 clear_tsk_trace_trace(p);
1733         } while_each_pid_task(pid, PIDTYPE_PID, p);
1734         put_pid(pid);
1735 }
1736
1737 static void set_ftrace_pid(struct pid *pid)
1738 {
1739         struct task_struct *p;
1740
1741         do_each_pid_task(pid, PIDTYPE_PID, p) {
1742                 set_tsk_trace_trace(p);
1743         } while_each_pid_task(pid, PIDTYPE_PID, p);
1744 }
1745
1746 static void clear_ftrace_pid_task(struct pid **pid)
1747 {
1748         if (*pid == ftrace_swapper_pid)
1749                 clear_ftrace_swapper();
1750         else
1751                 clear_ftrace_pid(*pid);
1752
1753         *pid = NULL;
1754 }
1755
1756 static void set_ftrace_pid_task(struct pid *pid)
1757 {
1758         if (pid == ftrace_swapper_pid)
1759                 set_ftrace_swapper();
1760         else
1761                 set_ftrace_pid(pid);
1762 }
1763
1764 static ssize_t
1765 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1766                    size_t cnt, loff_t *ppos)
1767 {
1768         struct pid *pid;
1769         char buf[64];
1770         long val;
1771         int ret;
1772
1773         if (cnt >= sizeof(buf))
1774                 return -EINVAL;
1775
1776         if (copy_from_user(&buf, ubuf, cnt))
1777                 return -EFAULT;
1778
1779         buf[cnt] = 0;
1780
1781         ret = strict_strtol(buf, 10, &val);
1782         if (ret < 0)
1783                 return ret;
1784
1785         mutex_lock(&ftrace_start_lock);
1786         if (val < 0) {
1787                 /* disable pid tracing */
1788                 if (!ftrace_pid_trace)
1789                         goto out;
1790
1791                 clear_ftrace_pid_task(&ftrace_pid_trace);
1792
1793         } else {
1794                 /* swapper task is special */
1795                 if (!val) {
1796                         pid = ftrace_swapper_pid;
1797                         if (pid == ftrace_pid_trace)
1798                                 goto out;
1799                 } else {
1800                         pid = find_get_pid(val);
1801
1802                         if (pid == ftrace_pid_trace) {
1803                                 put_pid(pid);
1804                                 goto out;
1805                         }
1806                 }
1807
1808                 if (ftrace_pid_trace)
1809                         clear_ftrace_pid_task(&ftrace_pid_trace);
1810
1811                 if (!pid)
1812                         goto out;
1813
1814                 ftrace_pid_trace = pid;
1815
1816                 set_ftrace_pid_task(ftrace_pid_trace);
1817         }
1818
1819         /* update the function call */
1820         ftrace_update_pid_func();
1821         ftrace_startup_enable(0);
1822
1823  out:
1824         mutex_unlock(&ftrace_start_lock);
1825
1826         return cnt;
1827 }
1828
1829 static struct file_operations ftrace_pid_fops = {
1830         .read = ftrace_pid_read,
1831         .write = ftrace_pid_write,
1832 };
1833
1834 static __init int ftrace_init_debugfs(void)
1835 {
1836         struct dentry *d_tracer;
1837         struct dentry *entry;
1838
1839         d_tracer = tracing_init_dentry();
1840         if (!d_tracer)
1841                 return 0;
1842
1843         ftrace_init_dyn_debugfs(d_tracer);
1844
1845         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1846                                     NULL, &ftrace_pid_fops);
1847         if (!entry)
1848                 pr_warning("Could not create debugfs "
1849                            "'set_ftrace_pid' entry\n");
1850         return 0;
1851 }
1852
1853 fs_initcall(ftrace_init_debugfs);
1854
1855 /**
1856  * ftrace_kill - kill ftrace
1857  *
1858  * This function should be used by panic code. It stops ftrace
1859  * but in a not so nice way. If you need to simply kill ftrace
1860  * from a non-atomic section, use ftrace_kill.
1861  */
1862 void ftrace_kill(void)
1863 {
1864         ftrace_disabled = 1;
1865         ftrace_enabled = 0;
1866         clear_ftrace_function();
1867 }
1868
1869 /**
1870  * register_ftrace_function - register a function for profiling
1871  * @ops - ops structure that holds the function for profiling.
1872  *
1873  * Register a function to be called by all functions in the
1874  * kernel.
1875  *
1876  * Note: @ops->func and all the functions it calls must be labeled
1877  *       with "notrace", otherwise it will go into a
1878  *       recursive loop.
1879  */
1880 int register_ftrace_function(struct ftrace_ops *ops)
1881 {
1882         int ret;
1883
1884         if (unlikely(ftrace_disabled))
1885                 return -1;
1886
1887         mutex_lock(&ftrace_sysctl_lock);
1888
1889         ret = __register_ftrace_function(ops);
1890         ftrace_startup(0);
1891
1892         mutex_unlock(&ftrace_sysctl_lock);
1893         return ret;
1894 }
1895
1896 /**
1897  * unregister_ftrace_function - unresgister a function for profiling.
1898  * @ops - ops structure that holds the function to unregister
1899  *
1900  * Unregister a function that was added to be called by ftrace profiling.
1901  */
1902 int unregister_ftrace_function(struct ftrace_ops *ops)
1903 {
1904         int ret;
1905
1906         mutex_lock(&ftrace_sysctl_lock);
1907         ret = __unregister_ftrace_function(ops);
1908         ftrace_shutdown(0);
1909         mutex_unlock(&ftrace_sysctl_lock);
1910
1911         return ret;
1912 }
1913
1914 int
1915 ftrace_enable_sysctl(struct ctl_table *table, int write,
1916                      struct file *file, void __user *buffer, size_t *lenp,
1917                      loff_t *ppos)
1918 {
1919         int ret;
1920
1921         if (unlikely(ftrace_disabled))
1922                 return -ENODEV;
1923
1924         mutex_lock(&ftrace_sysctl_lock);
1925
1926         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1927
1928         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1929                 goto out;
1930
1931         last_ftrace_enabled = ftrace_enabled;
1932
1933         if (ftrace_enabled) {
1934
1935                 ftrace_startup_sysctl();
1936
1937                 /* we are starting ftrace again */
1938                 if (ftrace_list != &ftrace_list_end) {
1939                         if (ftrace_list->next == &ftrace_list_end)
1940                                 ftrace_trace_function = ftrace_list->func;
1941                         else
1942                                 ftrace_trace_function = ftrace_list_func;
1943                 }
1944
1945         } else {
1946                 /* stopping ftrace calls (just send to ftrace_stub) */
1947                 ftrace_trace_function = ftrace_stub;
1948
1949                 ftrace_shutdown_sysctl();
1950         }
1951
1952  out:
1953         mutex_unlock(&ftrace_sysctl_lock);
1954         return ret;
1955 }
1956
1957 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1958
1959 static atomic_t ftrace_graph_active;
1960
1961 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1962 {
1963         return 0;
1964 }
1965
1966 /* The callbacks that hook a function */
1967 trace_func_graph_ret_t ftrace_graph_return =
1968                         (trace_func_graph_ret_t)ftrace_stub;
1969 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1970
1971 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1972 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1973 {
1974         int i;
1975         int ret = 0;
1976         unsigned long flags;
1977         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1978         struct task_struct *g, *t;
1979
1980         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1981                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1982                                         * sizeof(struct ftrace_ret_stack),
1983                                         GFP_KERNEL);
1984                 if (!ret_stack_list[i]) {
1985                         start = 0;
1986                         end = i;
1987                         ret = -ENOMEM;
1988                         goto free;
1989                 }
1990         }
1991
1992         read_lock_irqsave(&tasklist_lock, flags);
1993         do_each_thread(g, t) {
1994                 if (start == end) {
1995                         ret = -EAGAIN;
1996                         goto unlock;
1997                 }
1998
1999                 if (t->ret_stack == NULL) {
2000                         t->curr_ret_stack = -1;
2001                         /* Make sure IRQs see the -1 first: */
2002                         barrier();
2003                         t->ret_stack = ret_stack_list[start++];
2004                         atomic_set(&t->tracing_graph_pause, 0);
2005                         atomic_set(&t->trace_overrun, 0);
2006                 }
2007         } while_each_thread(g, t);
2008
2009 unlock:
2010         read_unlock_irqrestore(&tasklist_lock, flags);
2011 free:
2012         for (i = start; i < end; i++)
2013                 kfree(ret_stack_list[i]);
2014         return ret;
2015 }
2016
2017 /* Allocate a return stack for each task */
2018 static int start_graph_tracing(void)
2019 {
2020         struct ftrace_ret_stack **ret_stack_list;
2021         int ret;
2022
2023         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2024                                 sizeof(struct ftrace_ret_stack *),
2025                                 GFP_KERNEL);
2026
2027         if (!ret_stack_list)
2028                 return -ENOMEM;
2029
2030         do {
2031                 ret = alloc_retstack_tasklist(ret_stack_list);
2032         } while (ret == -EAGAIN);
2033
2034         kfree(ret_stack_list);
2035         return ret;
2036 }
2037
2038 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2039                         trace_func_graph_ent_t entryfunc)
2040 {
2041         int ret = 0;
2042
2043         mutex_lock(&ftrace_sysctl_lock);
2044
2045         atomic_inc(&ftrace_graph_active);
2046         ret = start_graph_tracing();
2047         if (ret) {
2048                 atomic_dec(&ftrace_graph_active);
2049                 goto out;
2050         }
2051
2052         ftrace_graph_return = retfunc;
2053         ftrace_graph_entry = entryfunc;
2054
2055         ftrace_startup(FTRACE_START_FUNC_RET);
2056
2057 out:
2058         mutex_unlock(&ftrace_sysctl_lock);
2059         return ret;
2060 }
2061
2062 void unregister_ftrace_graph(void)
2063 {
2064         mutex_lock(&ftrace_sysctl_lock);
2065
2066         atomic_dec(&ftrace_graph_active);
2067         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2068         ftrace_graph_entry = ftrace_graph_entry_stub;
2069         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2070
2071         mutex_unlock(&ftrace_sysctl_lock);
2072 }
2073
2074 /* Allocate a return stack for newly created task */
2075 void ftrace_graph_init_task(struct task_struct *t)
2076 {
2077         if (atomic_read(&ftrace_graph_active)) {
2078                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2079                                 * sizeof(struct ftrace_ret_stack),
2080                                 GFP_KERNEL);
2081                 if (!t->ret_stack)
2082                         return;
2083                 t->curr_ret_stack = -1;
2084                 atomic_set(&t->tracing_graph_pause, 0);
2085                 atomic_set(&t->trace_overrun, 0);
2086         } else
2087                 t->ret_stack = NULL;
2088 }
2089
2090 void ftrace_graph_exit_task(struct task_struct *t)
2091 {
2092         struct ftrace_ret_stack *ret_stack = t->ret_stack;
2093
2094         t->ret_stack = NULL;
2095         /* NULL must become visible to IRQs before we free it: */
2096         barrier();
2097
2098         kfree(ret_stack);
2099 }
2100
2101 void ftrace_graph_stop(void)
2102 {
2103         ftrace_stop();
2104 }
2105 #endif
2106