tracing: Show sample std dev in function profiling
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         do {                                    \
43                 if (WARN_ON(cond))              \
44                         ftrace_kill();          \
45         } while (0)
46
47 #define FTRACE_WARN_ON_ONCE(cond)               \
48         do {                                    \
49                 if (WARN_ON_ONCE(cond))         \
50                         ftrace_kill();          \
51         } while (0)
52
53 /* hash bits for specific function selection */
54 #define FTRACE_HASH_BITS 7
55 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
56
57 /* ftrace_enabled is a method to turn ftrace on or off */
58 int ftrace_enabled __read_mostly;
59 static int last_ftrace_enabled;
60
61 /* Quick disabling of function tracer. */
62 int function_trace_stop;
63
64 /* List for set_ftrace_pid's pids. */
65 LIST_HEAD(ftrace_pids);
66 struct ftrace_pid {
67         struct list_head list;
68         struct pid *pid;
69 };
70
71 /*
72  * ftrace_disabled is set when an anomaly is discovered.
73  * ftrace_disabled is much stronger than ftrace_enabled.
74  */
75 static int ftrace_disabled __read_mostly;
76
77 static DEFINE_MUTEX(ftrace_lock);
78
79 static struct ftrace_ops ftrace_list_end __read_mostly =
80 {
81         .func           = ftrace_stub,
82 };
83
84 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
85 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
86 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
87 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
88
89 /*
90  * Traverse the ftrace_list, invoking all entries.  The reason that we
91  * can use rcu_dereference_raw() is that elements removed from this list
92  * are simply leaked, so there is no need to interact with a grace-period
93  * mechanism.  The rcu_dereference_raw() calls are needed to handle
94  * concurrent insertions into the ftrace_list.
95  *
96  * Silly Alpha and silly pointer-speculation compiler optimizations!
97  */
98 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
99 {
100         struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
101
102         while (op != &ftrace_list_end) {
103                 op->func(ip, parent_ip);
104                 op = rcu_dereference_raw(op->next); /*see above*/
105         };
106 }
107
108 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
109 {
110         if (!test_tsk_trace_trace(current))
111                 return;
112
113         ftrace_pid_function(ip, parent_ip);
114 }
115
116 static void set_ftrace_pid_function(ftrace_func_t func)
117 {
118         /* do not set ftrace_pid_function to itself! */
119         if (func != ftrace_pid_func)
120                 ftrace_pid_function = func;
121 }
122
123 /**
124  * clear_ftrace_function - reset the ftrace function
125  *
126  * This NULLs the ftrace function and in essence stops
127  * tracing.  There may be lag
128  */
129 void clear_ftrace_function(void)
130 {
131         ftrace_trace_function = ftrace_stub;
132         __ftrace_trace_function = ftrace_stub;
133         ftrace_pid_function = ftrace_stub;
134 }
135
136 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
137 /*
138  * For those archs that do not test ftrace_trace_stop in their
139  * mcount call site, we need to do it from C.
140  */
141 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
142 {
143         if (function_trace_stop)
144                 return;
145
146         __ftrace_trace_function(ip, parent_ip);
147 }
148 #endif
149
150 static int __register_ftrace_function(struct ftrace_ops *ops)
151 {
152         ops->next = ftrace_list;
153         /*
154          * We are entering ops into the ftrace_list but another
155          * CPU might be walking that list. We need to make sure
156          * the ops->next pointer is valid before another CPU sees
157          * the ops pointer included into the ftrace_list.
158          */
159         rcu_assign_pointer(ftrace_list, ops);
160
161         if (ftrace_enabled) {
162                 ftrace_func_t func;
163
164                 if (ops->next == &ftrace_list_end)
165                         func = ops->func;
166                 else
167                         func = ftrace_list_func;
168
169                 if (!list_empty(&ftrace_pids)) {
170                         set_ftrace_pid_function(func);
171                         func = ftrace_pid_func;
172                 }
173
174                 /*
175                  * For one func, simply call it directly.
176                  * For more than one func, call the chain.
177                  */
178 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
179                 ftrace_trace_function = func;
180 #else
181                 __ftrace_trace_function = func;
182                 ftrace_trace_function = ftrace_test_stop_func;
183 #endif
184         }
185
186         return 0;
187 }
188
189 static int __unregister_ftrace_function(struct ftrace_ops *ops)
190 {
191         struct ftrace_ops **p;
192
193         /*
194          * If we are removing the last function, then simply point
195          * to the ftrace_stub.
196          */
197         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
198                 ftrace_trace_function = ftrace_stub;
199                 ftrace_list = &ftrace_list_end;
200                 return 0;
201         }
202
203         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
204                 if (*p == ops)
205                         break;
206
207         if (*p != ops)
208                 return -1;
209
210         *p = (*p)->next;
211
212         if (ftrace_enabled) {
213                 /* If we only have one func left, then call that directly */
214                 if (ftrace_list->next == &ftrace_list_end) {
215                         ftrace_func_t func = ftrace_list->func;
216
217                         if (!list_empty(&ftrace_pids)) {
218                                 set_ftrace_pid_function(func);
219                                 func = ftrace_pid_func;
220                         }
221 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
222                         ftrace_trace_function = func;
223 #else
224                         __ftrace_trace_function = func;
225 #endif
226                 }
227         }
228
229         return 0;
230 }
231
232 static void ftrace_update_pid_func(void)
233 {
234         ftrace_func_t func;
235
236         if (ftrace_trace_function == ftrace_stub)
237                 return;
238
239 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
240         func = ftrace_trace_function;
241 #else
242         func = __ftrace_trace_function;
243 #endif
244
245         if (!list_empty(&ftrace_pids)) {
246                 set_ftrace_pid_function(func);
247                 func = ftrace_pid_func;
248         } else {
249                 if (func == ftrace_pid_func)
250                         func = ftrace_pid_function;
251         }
252
253 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
254         ftrace_trace_function = func;
255 #else
256         __ftrace_trace_function = func;
257 #endif
258 }
259
260 #ifdef CONFIG_FUNCTION_PROFILER
261 struct ftrace_profile {
262         struct hlist_node               node;
263         unsigned long                   ip;
264         unsigned long                   counter;
265 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
266         unsigned long long              time;
267         unsigned long long              time_squared;
268 #endif
269 };
270
271 struct ftrace_profile_page {
272         struct ftrace_profile_page      *next;
273         unsigned long                   index;
274         struct ftrace_profile           records[];
275 };
276
277 struct ftrace_profile_stat {
278         atomic_t                        disabled;
279         struct hlist_head               *hash;
280         struct ftrace_profile_page      *pages;
281         struct ftrace_profile_page      *start;
282         struct tracer_stat              stat;
283 };
284
285 #define PROFILE_RECORDS_SIZE                                            \
286         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
287
288 #define PROFILES_PER_PAGE                                       \
289         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
290
291 static int ftrace_profile_bits __read_mostly;
292 static int ftrace_profile_enabled __read_mostly;
293
294 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
295 static DEFINE_MUTEX(ftrace_profile_lock);
296
297 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
298
299 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
300
301 static void *
302 function_stat_next(void *v, int idx)
303 {
304         struct ftrace_profile *rec = v;
305         struct ftrace_profile_page *pg;
306
307         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
308
309  again:
310         if (idx != 0)
311                 rec++;
312
313         if ((void *)rec >= (void *)&pg->records[pg->index]) {
314                 pg = pg->next;
315                 if (!pg)
316                         return NULL;
317                 rec = &pg->records[0];
318                 if (!rec->counter)
319                         goto again;
320         }
321
322         return rec;
323 }
324
325 static void *function_stat_start(struct tracer_stat *trace)
326 {
327         struct ftrace_profile_stat *stat =
328                 container_of(trace, struct ftrace_profile_stat, stat);
329
330         if (!stat || !stat->start)
331                 return NULL;
332
333         return function_stat_next(&stat->start->records[0], 0);
334 }
335
336 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
337 /* function graph compares on total time */
338 static int function_stat_cmp(void *p1, void *p2)
339 {
340         struct ftrace_profile *a = p1;
341         struct ftrace_profile *b = p2;
342
343         if (a->time < b->time)
344                 return -1;
345         if (a->time > b->time)
346                 return 1;
347         else
348                 return 0;
349 }
350 #else
351 /* not function graph compares against hits */
352 static int function_stat_cmp(void *p1, void *p2)
353 {
354         struct ftrace_profile *a = p1;
355         struct ftrace_profile *b = p2;
356
357         if (a->counter < b->counter)
358                 return -1;
359         if (a->counter > b->counter)
360                 return 1;
361         else
362                 return 0;
363 }
364 #endif
365
366 static int function_stat_headers(struct seq_file *m)
367 {
368 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369         seq_printf(m, "  Function                               "
370                    "Hit    Time            Avg             s^2\n"
371                       "  --------                               "
372                    "---    ----            ---             ---\n");
373 #else
374         seq_printf(m, "  Function                               Hit\n"
375                       "  --------                               ---\n");
376 #endif
377         return 0;
378 }
379
380 static int function_stat_show(struct seq_file *m, void *v)
381 {
382         struct ftrace_profile *rec = v;
383         char str[KSYM_SYMBOL_LEN];
384 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
385         static DEFINE_MUTEX(mutex);
386         static struct trace_seq s;
387         unsigned long long avg;
388         unsigned long long stddev;
389 #endif
390
391         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
393
394 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
395         seq_printf(m, "    ");
396         avg = rec->time;
397         do_div(avg, rec->counter);
398
399         /* Sample standard deviation (s^2) */
400         if (rec->counter <= 1)
401                 stddev = 0;
402         else {
403                 stddev = rec->time_squared - rec->counter * avg * avg;
404                 /*
405                  * Divide only 1000 for ns^2 -> us^2 conversion.
406                  * trace_print_graph_duration will divide 1000 again.
407                  */
408                 do_div(stddev, (rec->counter - 1) * 1000);
409         }
410
411         mutex_lock(&mutex);
412         trace_seq_init(&s);
413         trace_print_graph_duration(rec->time, &s);
414         trace_seq_puts(&s, "    ");
415         trace_print_graph_duration(avg, &s);
416         trace_seq_puts(&s, "    ");
417         trace_print_graph_duration(stddev, &s);
418         trace_print_seq(m, &s);
419         mutex_unlock(&mutex);
420 #endif
421         seq_putc(m, '\n');
422
423         return 0;
424 }
425
426 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
427 {
428         struct ftrace_profile_page *pg;
429
430         pg = stat->pages = stat->start;
431
432         while (pg) {
433                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
434                 pg->index = 0;
435                 pg = pg->next;
436         }
437
438         memset(stat->hash, 0,
439                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
440 }
441
442 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
443 {
444         struct ftrace_profile_page *pg;
445         int functions;
446         int pages;
447         int i;
448
449         /* If we already allocated, do nothing */
450         if (stat->pages)
451                 return 0;
452
453         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
454         if (!stat->pages)
455                 return -ENOMEM;
456
457 #ifdef CONFIG_DYNAMIC_FTRACE
458         functions = ftrace_update_tot_cnt;
459 #else
460         /*
461          * We do not know the number of functions that exist because
462          * dynamic tracing is what counts them. With past experience
463          * we have around 20K functions. That should be more than enough.
464          * It is highly unlikely we will execute every function in
465          * the kernel.
466          */
467         functions = 20000;
468 #endif
469
470         pg = stat->start = stat->pages;
471
472         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
473
474         for (i = 0; i < pages; i++) {
475                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
476                 if (!pg->next)
477                         goto out_free;
478                 pg = pg->next;
479         }
480
481         return 0;
482
483  out_free:
484         pg = stat->start;
485         while (pg) {
486                 unsigned long tmp = (unsigned long)pg;
487
488                 pg = pg->next;
489                 free_page(tmp);
490         }
491
492         free_page((unsigned long)stat->pages);
493         stat->pages = NULL;
494         stat->start = NULL;
495
496         return -ENOMEM;
497 }
498
499 static int ftrace_profile_init_cpu(int cpu)
500 {
501         struct ftrace_profile_stat *stat;
502         int size;
503
504         stat = &per_cpu(ftrace_profile_stats, cpu);
505
506         if (stat->hash) {
507                 /* If the profile is already created, simply reset it */
508                 ftrace_profile_reset(stat);
509                 return 0;
510         }
511
512         /*
513          * We are profiling all functions, but usually only a few thousand
514          * functions are hit. We'll make a hash of 1024 items.
515          */
516         size = FTRACE_PROFILE_HASH_SIZE;
517
518         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
519
520         if (!stat->hash)
521                 return -ENOMEM;
522
523         if (!ftrace_profile_bits) {
524                 size--;
525
526                 for (; size; size >>= 1)
527                         ftrace_profile_bits++;
528         }
529
530         /* Preallocate the function profiling pages */
531         if (ftrace_profile_pages_init(stat) < 0) {
532                 kfree(stat->hash);
533                 stat->hash = NULL;
534                 return -ENOMEM;
535         }
536
537         return 0;
538 }
539
540 static int ftrace_profile_init(void)
541 {
542         int cpu;
543         int ret = 0;
544
545         for_each_online_cpu(cpu) {
546                 ret = ftrace_profile_init_cpu(cpu);
547                 if (ret)
548                         break;
549         }
550
551         return ret;
552 }
553
554 /* interrupts must be disabled */
555 static struct ftrace_profile *
556 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
557 {
558         struct ftrace_profile *rec;
559         struct hlist_head *hhd;
560         struct hlist_node *n;
561         unsigned long key;
562
563         key = hash_long(ip, ftrace_profile_bits);
564         hhd = &stat->hash[key];
565
566         if (hlist_empty(hhd))
567                 return NULL;
568
569         hlist_for_each_entry_rcu(rec, n, hhd, node) {
570                 if (rec->ip == ip)
571                         return rec;
572         }
573
574         return NULL;
575 }
576
577 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
578                                struct ftrace_profile *rec)
579 {
580         unsigned long key;
581
582         key = hash_long(rec->ip, ftrace_profile_bits);
583         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
584 }
585
586 /*
587  * The memory is already allocated, this simply finds a new record to use.
588  */
589 static struct ftrace_profile *
590 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
591 {
592         struct ftrace_profile *rec = NULL;
593
594         /* prevent recursion (from NMIs) */
595         if (atomic_inc_return(&stat->disabled) != 1)
596                 goto out;
597
598         /*
599          * Try to find the function again since an NMI
600          * could have added it
601          */
602         rec = ftrace_find_profiled_func(stat, ip);
603         if (rec)
604                 goto out;
605
606         if (stat->pages->index == PROFILES_PER_PAGE) {
607                 if (!stat->pages->next)
608                         goto out;
609                 stat->pages = stat->pages->next;
610         }
611
612         rec = &stat->pages->records[stat->pages->index++];
613         rec->ip = ip;
614         ftrace_add_profile(stat, rec);
615
616  out:
617         atomic_dec(&stat->disabled);
618
619         return rec;
620 }
621
622 static void
623 function_profile_call(unsigned long ip, unsigned long parent_ip)
624 {
625         struct ftrace_profile_stat *stat;
626         struct ftrace_profile *rec;
627         unsigned long flags;
628
629         if (!ftrace_profile_enabled)
630                 return;
631
632         local_irq_save(flags);
633
634         stat = &__get_cpu_var(ftrace_profile_stats);
635         if (!stat->hash || !ftrace_profile_enabled)
636                 goto out;
637
638         rec = ftrace_find_profiled_func(stat, ip);
639         if (!rec) {
640                 rec = ftrace_profile_alloc(stat, ip);
641                 if (!rec)
642                         goto out;
643         }
644
645         rec->counter++;
646  out:
647         local_irq_restore(flags);
648 }
649
650 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
651 static int profile_graph_entry(struct ftrace_graph_ent *trace)
652 {
653         function_profile_call(trace->func, 0);
654         return 1;
655 }
656
657 static void profile_graph_return(struct ftrace_graph_ret *trace)
658 {
659         struct ftrace_profile_stat *stat;
660         unsigned long long calltime;
661         struct ftrace_profile *rec;
662         unsigned long flags;
663
664         local_irq_save(flags);
665         stat = &__get_cpu_var(ftrace_profile_stats);
666         if (!stat->hash || !ftrace_profile_enabled)
667                 goto out;
668
669         calltime = trace->rettime - trace->calltime;
670
671         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
672                 int index;
673
674                 index = trace->depth;
675
676                 /* Append this call time to the parent time to subtract */
677                 if (index)
678                         current->ret_stack[index - 1].subtime += calltime;
679
680                 if (current->ret_stack[index].subtime < calltime)
681                         calltime -= current->ret_stack[index].subtime;
682                 else
683                         calltime = 0;
684         }
685
686         rec = ftrace_find_profiled_func(stat, trace->func);
687         if (rec) {
688                 rec->time += calltime;
689                 rec->time_squared += calltime * calltime;
690         }
691
692  out:
693         local_irq_restore(flags);
694 }
695
696 static int register_ftrace_profiler(void)
697 {
698         return register_ftrace_graph(&profile_graph_return,
699                                      &profile_graph_entry);
700 }
701
702 static void unregister_ftrace_profiler(void)
703 {
704         unregister_ftrace_graph();
705 }
706 #else
707 static struct ftrace_ops ftrace_profile_ops __read_mostly =
708 {
709         .func           = function_profile_call,
710 };
711
712 static int register_ftrace_profiler(void)
713 {
714         return register_ftrace_function(&ftrace_profile_ops);
715 }
716
717 static void unregister_ftrace_profiler(void)
718 {
719         unregister_ftrace_function(&ftrace_profile_ops);
720 }
721 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
722
723 static ssize_t
724 ftrace_profile_write(struct file *filp, const char __user *ubuf,
725                      size_t cnt, loff_t *ppos)
726 {
727         unsigned long val;
728         char buf[64];           /* big enough to hold a number */
729         int ret;
730
731         if (cnt >= sizeof(buf))
732                 return -EINVAL;
733
734         if (copy_from_user(&buf, ubuf, cnt))
735                 return -EFAULT;
736
737         buf[cnt] = 0;
738
739         ret = strict_strtoul(buf, 10, &val);
740         if (ret < 0)
741                 return ret;
742
743         val = !!val;
744
745         mutex_lock(&ftrace_profile_lock);
746         if (ftrace_profile_enabled ^ val) {
747                 if (val) {
748                         ret = ftrace_profile_init();
749                         if (ret < 0) {
750                                 cnt = ret;
751                                 goto out;
752                         }
753
754                         ret = register_ftrace_profiler();
755                         if (ret < 0) {
756                                 cnt = ret;
757                                 goto out;
758                         }
759                         ftrace_profile_enabled = 1;
760                 } else {
761                         ftrace_profile_enabled = 0;
762                         /*
763                          * unregister_ftrace_profiler calls stop_machine
764                          * so this acts like an synchronize_sched.
765                          */
766                         unregister_ftrace_profiler();
767                 }
768         }
769  out:
770         mutex_unlock(&ftrace_profile_lock);
771
772         *ppos += cnt;
773
774         return cnt;
775 }
776
777 static ssize_t
778 ftrace_profile_read(struct file *filp, char __user *ubuf,
779                      size_t cnt, loff_t *ppos)
780 {
781         char buf[64];           /* big enough to hold a number */
782         int r;
783
784         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
785         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
786 }
787
788 static const struct file_operations ftrace_profile_fops = {
789         .open           = tracing_open_generic,
790         .read           = ftrace_profile_read,
791         .write          = ftrace_profile_write,
792 };
793
794 /* used to initialize the real stat files */
795 static struct tracer_stat function_stats __initdata = {
796         .name           = "functions",
797         .stat_start     = function_stat_start,
798         .stat_next      = function_stat_next,
799         .stat_cmp       = function_stat_cmp,
800         .stat_headers   = function_stat_headers,
801         .stat_show      = function_stat_show
802 };
803
804 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
805 {
806         struct ftrace_profile_stat *stat;
807         struct dentry *entry;
808         char *name;
809         int ret;
810         int cpu;
811
812         for_each_possible_cpu(cpu) {
813                 stat = &per_cpu(ftrace_profile_stats, cpu);
814
815                 /* allocate enough for function name + cpu number */
816                 name = kmalloc(32, GFP_KERNEL);
817                 if (!name) {
818                         /*
819                          * The files created are permanent, if something happens
820                          * we still do not free memory.
821                          */
822                         WARN(1,
823                              "Could not allocate stat file for cpu %d\n",
824                              cpu);
825                         return;
826                 }
827                 stat->stat = function_stats;
828                 snprintf(name, 32, "function%d", cpu);
829                 stat->stat.name = name;
830                 ret = register_stat_tracer(&stat->stat);
831                 if (ret) {
832                         WARN(1,
833                              "Could not register function stat for cpu %d\n",
834                              cpu);
835                         kfree(name);
836                         return;
837                 }
838         }
839
840         entry = debugfs_create_file("function_profile_enabled", 0644,
841                                     d_tracer, NULL, &ftrace_profile_fops);
842         if (!entry)
843                 pr_warning("Could not create debugfs "
844                            "'function_profile_enabled' entry\n");
845 }
846
847 #else /* CONFIG_FUNCTION_PROFILER */
848 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
849 {
850 }
851 #endif /* CONFIG_FUNCTION_PROFILER */
852
853 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
854
855 #ifdef CONFIG_DYNAMIC_FTRACE
856
857 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
858 # error Dynamic ftrace depends on MCOUNT_RECORD
859 #endif
860
861 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
862
863 struct ftrace_func_probe {
864         struct hlist_node       node;
865         struct ftrace_probe_ops *ops;
866         unsigned long           flags;
867         unsigned long           ip;
868         void                    *data;
869         struct rcu_head         rcu;
870 };
871
872 enum {
873         FTRACE_ENABLE_CALLS             = (1 << 0),
874         FTRACE_DISABLE_CALLS            = (1 << 1),
875         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
876         FTRACE_ENABLE_MCOUNT            = (1 << 3),
877         FTRACE_DISABLE_MCOUNT           = (1 << 4),
878         FTRACE_START_FUNC_RET           = (1 << 5),
879         FTRACE_STOP_FUNC_RET            = (1 << 6),
880 };
881
882 static int ftrace_filtered;
883
884 static struct dyn_ftrace *ftrace_new_addrs;
885
886 static DEFINE_MUTEX(ftrace_regex_lock);
887
888 struct ftrace_page {
889         struct ftrace_page      *next;
890         int                     index;
891         struct dyn_ftrace       records[];
892 };
893
894 #define ENTRIES_PER_PAGE \
895   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
896
897 /* estimate from running different kernels */
898 #define NR_TO_INIT              10000
899
900 static struct ftrace_page       *ftrace_pages_start;
901 static struct ftrace_page       *ftrace_pages;
902
903 static struct dyn_ftrace *ftrace_free_records;
904
905 /*
906  * This is a double for. Do not use 'break' to break out of the loop,
907  * you must use a goto.
908  */
909 #define do_for_each_ftrace_rec(pg, rec)                                 \
910         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
911                 int _____i;                                             \
912                 for (_____i = 0; _____i < pg->index; _____i++) {        \
913                         rec = &pg->records[_____i];
914
915 #define while_for_each_ftrace_rec()             \
916                 }                               \
917         }
918
919 static void ftrace_free_rec(struct dyn_ftrace *rec)
920 {
921         rec->freelist = ftrace_free_records;
922         ftrace_free_records = rec;
923         rec->flags |= FTRACE_FL_FREE;
924 }
925
926 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
927 {
928         struct dyn_ftrace *rec;
929
930         /* First check for freed records */
931         if (ftrace_free_records) {
932                 rec = ftrace_free_records;
933
934                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
935                         FTRACE_WARN_ON_ONCE(1);
936                         ftrace_free_records = NULL;
937                         return NULL;
938                 }
939
940                 ftrace_free_records = rec->freelist;
941                 memset(rec, 0, sizeof(*rec));
942                 return rec;
943         }
944
945         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
946                 if (!ftrace_pages->next) {
947                         /* allocate another page */
948                         ftrace_pages->next =
949                                 (void *)get_zeroed_page(GFP_KERNEL);
950                         if (!ftrace_pages->next)
951                                 return NULL;
952                 }
953                 ftrace_pages = ftrace_pages->next;
954         }
955
956         return &ftrace_pages->records[ftrace_pages->index++];
957 }
958
959 static struct dyn_ftrace *
960 ftrace_record_ip(unsigned long ip)
961 {
962         struct dyn_ftrace *rec;
963
964         if (ftrace_disabled)
965                 return NULL;
966
967         rec = ftrace_alloc_dyn_node(ip);
968         if (!rec)
969                 return NULL;
970
971         rec->ip = ip;
972         rec->newlist = ftrace_new_addrs;
973         ftrace_new_addrs = rec;
974
975         return rec;
976 }
977
978 static void print_ip_ins(const char *fmt, unsigned char *p)
979 {
980         int i;
981
982         printk(KERN_CONT "%s", fmt);
983
984         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
985                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
986 }
987
988 static void ftrace_bug(int failed, unsigned long ip)
989 {
990         switch (failed) {
991         case -EFAULT:
992                 FTRACE_WARN_ON_ONCE(1);
993                 pr_info("ftrace faulted on modifying ");
994                 print_ip_sym(ip);
995                 break;
996         case -EINVAL:
997                 FTRACE_WARN_ON_ONCE(1);
998                 pr_info("ftrace failed to modify ");
999                 print_ip_sym(ip);
1000                 print_ip_ins(" actual: ", (unsigned char *)ip);
1001                 printk(KERN_CONT "\n");
1002                 break;
1003         case -EPERM:
1004                 FTRACE_WARN_ON_ONCE(1);
1005                 pr_info("ftrace faulted on writing ");
1006                 print_ip_sym(ip);
1007                 break;
1008         default:
1009                 FTRACE_WARN_ON_ONCE(1);
1010                 pr_info("ftrace faulted on unknown error ");
1011                 print_ip_sym(ip);
1012         }
1013 }
1014
1015
1016 /* Return 1 if the address range is reserved for ftrace */
1017 int ftrace_text_reserved(void *start, void *end)
1018 {
1019         struct dyn_ftrace *rec;
1020         struct ftrace_page *pg;
1021
1022         do_for_each_ftrace_rec(pg, rec) {
1023                 if (rec->ip <= (unsigned long)end &&
1024                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1025                         return 1;
1026         } while_for_each_ftrace_rec();
1027         return 0;
1028 }
1029
1030
1031 static int
1032 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1033 {
1034         unsigned long ftrace_addr;
1035         unsigned long flag = 0UL;
1036
1037         ftrace_addr = (unsigned long)FTRACE_ADDR;
1038
1039         /*
1040          * If this record is not to be traced or we want to disable it,
1041          * then disable it.
1042          *
1043          * If we want to enable it and filtering is off, then enable it.
1044          *
1045          * If we want to enable it and filtering is on, enable it only if
1046          * it's filtered
1047          */
1048         if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1049                 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1050                         flag = FTRACE_FL_ENABLED;
1051         }
1052
1053         /* If the state of this record hasn't changed, then do nothing */
1054         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1055                 return 0;
1056
1057         if (flag) {
1058                 rec->flags |= FTRACE_FL_ENABLED;
1059                 return ftrace_make_call(rec, ftrace_addr);
1060         }
1061
1062         rec->flags &= ~FTRACE_FL_ENABLED;
1063         return ftrace_make_nop(NULL, rec, ftrace_addr);
1064 }
1065
1066 static void ftrace_replace_code(int enable)
1067 {
1068         struct dyn_ftrace *rec;
1069         struct ftrace_page *pg;
1070         int failed;
1071
1072         do_for_each_ftrace_rec(pg, rec) {
1073                 /*
1074                  * Skip over free records, records that have
1075                  * failed and not converted.
1076                  */
1077                 if (rec->flags & FTRACE_FL_FREE ||
1078                     rec->flags & FTRACE_FL_FAILED ||
1079                     !(rec->flags & FTRACE_FL_CONVERTED))
1080                         continue;
1081
1082                 failed = __ftrace_replace_code(rec, enable);
1083                 if (failed) {
1084                         rec->flags |= FTRACE_FL_FAILED;
1085                         ftrace_bug(failed, rec->ip);
1086                         /* Stop processing */
1087                         return;
1088                 }
1089         } while_for_each_ftrace_rec();
1090 }
1091
1092 static int
1093 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1094 {
1095         unsigned long ip;
1096         int ret;
1097
1098         ip = rec->ip;
1099
1100         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1101         if (ret) {
1102                 ftrace_bug(ret, ip);
1103                 rec->flags |= FTRACE_FL_FAILED;
1104                 return 0;
1105         }
1106         return 1;
1107 }
1108
1109 /*
1110  * archs can override this function if they must do something
1111  * before the modifying code is performed.
1112  */
1113 int __weak ftrace_arch_code_modify_prepare(void)
1114 {
1115         return 0;
1116 }
1117
1118 /*
1119  * archs can override this function if they must do something
1120  * after the modifying code is performed.
1121  */
1122 int __weak ftrace_arch_code_modify_post_process(void)
1123 {
1124         return 0;
1125 }
1126
1127 static int __ftrace_modify_code(void *data)
1128 {
1129         int *command = data;
1130
1131         if (*command & FTRACE_ENABLE_CALLS)
1132                 ftrace_replace_code(1);
1133         else if (*command & FTRACE_DISABLE_CALLS)
1134                 ftrace_replace_code(0);
1135
1136         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1137                 ftrace_update_ftrace_func(ftrace_trace_function);
1138
1139         if (*command & FTRACE_START_FUNC_RET)
1140                 ftrace_enable_ftrace_graph_caller();
1141         else if (*command & FTRACE_STOP_FUNC_RET)
1142                 ftrace_disable_ftrace_graph_caller();
1143
1144         return 0;
1145 }
1146
1147 static void ftrace_run_update_code(int command)
1148 {
1149         int ret;
1150
1151         ret = ftrace_arch_code_modify_prepare();
1152         FTRACE_WARN_ON(ret);
1153         if (ret)
1154                 return;
1155
1156         stop_machine(__ftrace_modify_code, &command, NULL);
1157
1158         ret = ftrace_arch_code_modify_post_process();
1159         FTRACE_WARN_ON(ret);
1160 }
1161
1162 static ftrace_func_t saved_ftrace_func;
1163 static int ftrace_start_up;
1164
1165 static void ftrace_startup_enable(int command)
1166 {
1167         if (saved_ftrace_func != ftrace_trace_function) {
1168                 saved_ftrace_func = ftrace_trace_function;
1169                 command |= FTRACE_UPDATE_TRACE_FUNC;
1170         }
1171
1172         if (!command || !ftrace_enabled)
1173                 return;
1174
1175         ftrace_run_update_code(command);
1176 }
1177
1178 static void ftrace_startup(int command)
1179 {
1180         if (unlikely(ftrace_disabled))
1181                 return;
1182
1183         ftrace_start_up++;
1184         command |= FTRACE_ENABLE_CALLS;
1185
1186         ftrace_startup_enable(command);
1187 }
1188
1189 static void ftrace_shutdown(int command)
1190 {
1191         if (unlikely(ftrace_disabled))
1192                 return;
1193
1194         ftrace_start_up--;
1195         /*
1196          * Just warn in case of unbalance, no need to kill ftrace, it's not
1197          * critical but the ftrace_call callers may be never nopped again after
1198          * further ftrace uses.
1199          */
1200         WARN_ON_ONCE(ftrace_start_up < 0);
1201
1202         if (!ftrace_start_up)
1203                 command |= FTRACE_DISABLE_CALLS;
1204
1205         if (saved_ftrace_func != ftrace_trace_function) {
1206                 saved_ftrace_func = ftrace_trace_function;
1207                 command |= FTRACE_UPDATE_TRACE_FUNC;
1208         }
1209
1210         if (!command || !ftrace_enabled)
1211                 return;
1212
1213         ftrace_run_update_code(command);
1214 }
1215
1216 static void ftrace_startup_sysctl(void)
1217 {
1218         int command = FTRACE_ENABLE_MCOUNT;
1219
1220         if (unlikely(ftrace_disabled))
1221                 return;
1222
1223         /* Force update next time */
1224         saved_ftrace_func = NULL;
1225         /* ftrace_start_up is true if we want ftrace running */
1226         if (ftrace_start_up)
1227                 command |= FTRACE_ENABLE_CALLS;
1228
1229         ftrace_run_update_code(command);
1230 }
1231
1232 static void ftrace_shutdown_sysctl(void)
1233 {
1234         int command = FTRACE_DISABLE_MCOUNT;
1235
1236         if (unlikely(ftrace_disabled))
1237                 return;
1238
1239         /* ftrace_start_up is true if ftrace is running */
1240         if (ftrace_start_up)
1241                 command |= FTRACE_DISABLE_CALLS;
1242
1243         ftrace_run_update_code(command);
1244 }
1245
1246 static cycle_t          ftrace_update_time;
1247 static unsigned long    ftrace_update_cnt;
1248 unsigned long           ftrace_update_tot_cnt;
1249
1250 static int ftrace_update_code(struct module *mod)
1251 {
1252         struct dyn_ftrace *p;
1253         cycle_t start, stop;
1254
1255         start = ftrace_now(raw_smp_processor_id());
1256         ftrace_update_cnt = 0;
1257
1258         while (ftrace_new_addrs) {
1259
1260                 /* If something went wrong, bail without enabling anything */
1261                 if (unlikely(ftrace_disabled))
1262                         return -1;
1263
1264                 p = ftrace_new_addrs;
1265                 ftrace_new_addrs = p->newlist;
1266                 p->flags = 0L;
1267
1268                 /*
1269                  * Do the initial record convertion from mcount jump
1270                  * to the NOP instructions.
1271                  */
1272                 if (!ftrace_code_disable(mod, p)) {
1273                         ftrace_free_rec(p);
1274                         continue;
1275                 }
1276
1277                 p->flags |= FTRACE_FL_CONVERTED;
1278                 ftrace_update_cnt++;
1279
1280                 /*
1281                  * If the tracing is enabled, go ahead and enable the record.
1282                  *
1283                  * The reason not to enable the record immediatelly is the
1284                  * inherent check of ftrace_make_nop/ftrace_make_call for
1285                  * correct previous instructions.  Making first the NOP
1286                  * conversion puts the module to the correct state, thus
1287                  * passing the ftrace_make_call check.
1288                  */
1289                 if (ftrace_start_up) {
1290                         int failed = __ftrace_replace_code(p, 1);
1291                         if (failed) {
1292                                 ftrace_bug(failed, p->ip);
1293                                 ftrace_free_rec(p);
1294                         }
1295                 }
1296         }
1297
1298         stop = ftrace_now(raw_smp_processor_id());
1299         ftrace_update_time = stop - start;
1300         ftrace_update_tot_cnt += ftrace_update_cnt;
1301
1302         return 0;
1303 }
1304
1305 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1306 {
1307         struct ftrace_page *pg;
1308         int cnt;
1309         int i;
1310
1311         /* allocate a few pages */
1312         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1313         if (!ftrace_pages_start)
1314                 return -1;
1315
1316         /*
1317          * Allocate a few more pages.
1318          *
1319          * TODO: have some parser search vmlinux before
1320          *   final linking to find all calls to ftrace.
1321          *   Then we can:
1322          *    a) know how many pages to allocate.
1323          *     and/or
1324          *    b) set up the table then.
1325          *
1326          *  The dynamic code is still necessary for
1327          *  modules.
1328          */
1329
1330         pg = ftrace_pages = ftrace_pages_start;
1331
1332         cnt = num_to_init / ENTRIES_PER_PAGE;
1333         pr_info("ftrace: allocating %ld entries in %d pages\n",
1334                 num_to_init, cnt + 1);
1335
1336         for (i = 0; i < cnt; i++) {
1337                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1338
1339                 /* If we fail, we'll try later anyway */
1340                 if (!pg->next)
1341                         break;
1342
1343                 pg = pg->next;
1344         }
1345
1346         return 0;
1347 }
1348
1349 enum {
1350         FTRACE_ITER_FILTER      = (1 << 0),
1351         FTRACE_ITER_NOTRACE     = (1 << 1),
1352         FTRACE_ITER_FAILURES    = (1 << 2),
1353         FTRACE_ITER_PRINTALL    = (1 << 3),
1354         FTRACE_ITER_HASH        = (1 << 4),
1355 };
1356
1357 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1358
1359 struct ftrace_iterator {
1360         struct ftrace_page      *pg;
1361         int                     hidx;
1362         int                     idx;
1363         unsigned                flags;
1364         struct trace_parser     parser;
1365 };
1366
1367 static void *
1368 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1369 {
1370         struct ftrace_iterator *iter = m->private;
1371         struct hlist_node *hnd = v;
1372         struct hlist_head *hhd;
1373
1374         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1375
1376         (*pos)++;
1377
1378  retry:
1379         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1380                 return NULL;
1381
1382         hhd = &ftrace_func_hash[iter->hidx];
1383
1384         if (hlist_empty(hhd)) {
1385                 iter->hidx++;
1386                 hnd = NULL;
1387                 goto retry;
1388         }
1389
1390         if (!hnd)
1391                 hnd = hhd->first;
1392         else {
1393                 hnd = hnd->next;
1394                 if (!hnd) {
1395                         iter->hidx++;
1396                         goto retry;
1397                 }
1398         }
1399
1400         return hnd;
1401 }
1402
1403 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1404 {
1405         struct ftrace_iterator *iter = m->private;
1406         void *p = NULL;
1407         loff_t l;
1408
1409         if (!(iter->flags & FTRACE_ITER_HASH))
1410                 *pos = 0;
1411
1412         iter->flags |= FTRACE_ITER_HASH;
1413
1414         iter->hidx = 0;
1415         for (l = 0; l <= *pos; ) {
1416                 p = t_hash_next(m, p, &l);
1417                 if (!p)
1418                         break;
1419         }
1420         return p;
1421 }
1422
1423 static int t_hash_show(struct seq_file *m, void *v)
1424 {
1425         struct ftrace_func_probe *rec;
1426         struct hlist_node *hnd = v;
1427
1428         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1429
1430         if (rec->ops->print)
1431                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1432
1433         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1434
1435         if (rec->data)
1436                 seq_printf(m, ":%p", rec->data);
1437         seq_putc(m, '\n');
1438
1439         return 0;
1440 }
1441
1442 static void *
1443 t_next(struct seq_file *m, void *v, loff_t *pos)
1444 {
1445         struct ftrace_iterator *iter = m->private;
1446         struct dyn_ftrace *rec = NULL;
1447
1448         if (iter->flags & FTRACE_ITER_HASH)
1449                 return t_hash_next(m, v, pos);
1450
1451         (*pos)++;
1452
1453         if (iter->flags & FTRACE_ITER_PRINTALL)
1454                 return NULL;
1455
1456  retry:
1457         if (iter->idx >= iter->pg->index) {
1458                 if (iter->pg->next) {
1459                         iter->pg = iter->pg->next;
1460                         iter->idx = 0;
1461                         goto retry;
1462                 }
1463         } else {
1464                 rec = &iter->pg->records[iter->idx++];
1465                 if ((rec->flags & FTRACE_FL_FREE) ||
1466
1467                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1468                      (rec->flags & FTRACE_FL_FAILED)) ||
1469
1470                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1471                      !(rec->flags & FTRACE_FL_FAILED)) ||
1472
1473                     ((iter->flags & FTRACE_ITER_FILTER) &&
1474                      !(rec->flags & FTRACE_FL_FILTER)) ||
1475
1476                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1477                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1478                         rec = NULL;
1479                         goto retry;
1480                 }
1481         }
1482
1483         return rec;
1484 }
1485
1486 static void *t_start(struct seq_file *m, loff_t *pos)
1487 {
1488         struct ftrace_iterator *iter = m->private;
1489         void *p = NULL;
1490         loff_t l;
1491
1492         mutex_lock(&ftrace_lock);
1493         /*
1494          * For set_ftrace_filter reading, if we have the filter
1495          * off, we can short cut and just print out that all
1496          * functions are enabled.
1497          */
1498         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1499                 if (*pos > 0)
1500                         return t_hash_start(m, pos);
1501                 iter->flags |= FTRACE_ITER_PRINTALL;
1502                 return iter;
1503         }
1504
1505         if (iter->flags & FTRACE_ITER_HASH)
1506                 return t_hash_start(m, pos);
1507
1508         iter->pg = ftrace_pages_start;
1509         iter->idx = 0;
1510         for (l = 0; l <= *pos; ) {
1511                 p = t_next(m, p, &l);
1512                 if (!p)
1513                         break;
1514         }
1515
1516         if (!p && iter->flags & FTRACE_ITER_FILTER)
1517                 return t_hash_start(m, pos);
1518
1519         return p;
1520 }
1521
1522 static void t_stop(struct seq_file *m, void *p)
1523 {
1524         mutex_unlock(&ftrace_lock);
1525 }
1526
1527 static int t_show(struct seq_file *m, void *v)
1528 {
1529         struct ftrace_iterator *iter = m->private;
1530         struct dyn_ftrace *rec = v;
1531
1532         if (iter->flags & FTRACE_ITER_HASH)
1533                 return t_hash_show(m, v);
1534
1535         if (iter->flags & FTRACE_ITER_PRINTALL) {
1536                 seq_printf(m, "#### all functions enabled ####\n");
1537                 return 0;
1538         }
1539
1540         if (!rec)
1541                 return 0;
1542
1543         seq_printf(m, "%ps\n", (void *)rec->ip);
1544
1545         return 0;
1546 }
1547
1548 static const struct seq_operations show_ftrace_seq_ops = {
1549         .start = t_start,
1550         .next = t_next,
1551         .stop = t_stop,
1552         .show = t_show,
1553 };
1554
1555 static int
1556 ftrace_avail_open(struct inode *inode, struct file *file)
1557 {
1558         struct ftrace_iterator *iter;
1559         int ret;
1560
1561         if (unlikely(ftrace_disabled))
1562                 return -ENODEV;
1563
1564         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1565         if (!iter)
1566                 return -ENOMEM;
1567
1568         iter->pg = ftrace_pages_start;
1569
1570         ret = seq_open(file, &show_ftrace_seq_ops);
1571         if (!ret) {
1572                 struct seq_file *m = file->private_data;
1573
1574                 m->private = iter;
1575         } else {
1576                 kfree(iter);
1577         }
1578
1579         return ret;
1580 }
1581
1582 static int
1583 ftrace_failures_open(struct inode *inode, struct file *file)
1584 {
1585         int ret;
1586         struct seq_file *m;
1587         struct ftrace_iterator *iter;
1588
1589         ret = ftrace_avail_open(inode, file);
1590         if (!ret) {
1591                 m = (struct seq_file *)file->private_data;
1592                 iter = (struct ftrace_iterator *)m->private;
1593                 iter->flags = FTRACE_ITER_FAILURES;
1594         }
1595
1596         return ret;
1597 }
1598
1599
1600 static void ftrace_filter_reset(int enable)
1601 {
1602         struct ftrace_page *pg;
1603         struct dyn_ftrace *rec;
1604         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1605
1606         mutex_lock(&ftrace_lock);
1607         if (enable)
1608                 ftrace_filtered = 0;
1609         do_for_each_ftrace_rec(pg, rec) {
1610                 if (rec->flags & FTRACE_FL_FAILED)
1611                         continue;
1612                 rec->flags &= ~type;
1613         } while_for_each_ftrace_rec();
1614         mutex_unlock(&ftrace_lock);
1615 }
1616
1617 static int
1618 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1619 {
1620         struct ftrace_iterator *iter;
1621         int ret = 0;
1622
1623         if (unlikely(ftrace_disabled))
1624                 return -ENODEV;
1625
1626         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1627         if (!iter)
1628                 return -ENOMEM;
1629
1630         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1631                 kfree(iter);
1632                 return -ENOMEM;
1633         }
1634
1635         mutex_lock(&ftrace_regex_lock);
1636         if ((file->f_mode & FMODE_WRITE) &&
1637             (file->f_flags & O_TRUNC))
1638                 ftrace_filter_reset(enable);
1639
1640         if (file->f_mode & FMODE_READ) {
1641                 iter->pg = ftrace_pages_start;
1642                 iter->flags = enable ? FTRACE_ITER_FILTER :
1643                         FTRACE_ITER_NOTRACE;
1644
1645                 ret = seq_open(file, &show_ftrace_seq_ops);
1646                 if (!ret) {
1647                         struct seq_file *m = file->private_data;
1648                         m->private = iter;
1649                 } else {
1650                         trace_parser_put(&iter->parser);
1651                         kfree(iter);
1652                 }
1653         } else
1654                 file->private_data = iter;
1655         mutex_unlock(&ftrace_regex_lock);
1656
1657         return ret;
1658 }
1659
1660 static int
1661 ftrace_filter_open(struct inode *inode, struct file *file)
1662 {
1663         return ftrace_regex_open(inode, file, 1);
1664 }
1665
1666 static int
1667 ftrace_notrace_open(struct inode *inode, struct file *file)
1668 {
1669         return ftrace_regex_open(inode, file, 0);
1670 }
1671
1672 static loff_t
1673 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1674 {
1675         loff_t ret;
1676
1677         if (file->f_mode & FMODE_READ)
1678                 ret = seq_lseek(file, offset, origin);
1679         else
1680                 file->f_pos = ret = 1;
1681
1682         return ret;
1683 }
1684
1685 static int ftrace_match(char *str, char *regex, int len, int type)
1686 {
1687         int matched = 0;
1688         int slen;
1689
1690         switch (type) {
1691         case MATCH_FULL:
1692                 if (strcmp(str, regex) == 0)
1693                         matched = 1;
1694                 break;
1695         case MATCH_FRONT_ONLY:
1696                 if (strncmp(str, regex, len) == 0)
1697                         matched = 1;
1698                 break;
1699         case MATCH_MIDDLE_ONLY:
1700                 if (strstr(str, regex))
1701                         matched = 1;
1702                 break;
1703         case MATCH_END_ONLY:
1704                 slen = strlen(str);
1705                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1706                         matched = 1;
1707                 break;
1708         }
1709
1710         return matched;
1711 }
1712
1713 static int
1714 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1715 {
1716         char str[KSYM_SYMBOL_LEN];
1717
1718         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1719         return ftrace_match(str, regex, len, type);
1720 }
1721
1722 static int ftrace_match_records(char *buff, int len, int enable)
1723 {
1724         unsigned int search_len;
1725         struct ftrace_page *pg;
1726         struct dyn_ftrace *rec;
1727         unsigned long flag;
1728         char *search;
1729         int type;
1730         int not;
1731         int found = 0;
1732
1733         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1734         type = filter_parse_regex(buff, len, &search, &not);
1735
1736         search_len = strlen(search);
1737
1738         mutex_lock(&ftrace_lock);
1739         do_for_each_ftrace_rec(pg, rec) {
1740
1741                 if (rec->flags & FTRACE_FL_FAILED)
1742                         continue;
1743
1744                 if (ftrace_match_record(rec, search, search_len, type)) {
1745                         if (not)
1746                                 rec->flags &= ~flag;
1747                         else
1748                                 rec->flags |= flag;
1749                         found = 1;
1750                 }
1751                 /*
1752                  * Only enable filtering if we have a function that
1753                  * is filtered on.
1754                  */
1755                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1756                         ftrace_filtered = 1;
1757         } while_for_each_ftrace_rec();
1758         mutex_unlock(&ftrace_lock);
1759
1760         return found;
1761 }
1762
1763 static int
1764 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1765                            char *regex, int len, int type)
1766 {
1767         char str[KSYM_SYMBOL_LEN];
1768         char *modname;
1769
1770         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1771
1772         if (!modname || strcmp(modname, mod))
1773                 return 0;
1774
1775         /* blank search means to match all funcs in the mod */
1776         if (len)
1777                 return ftrace_match(str, regex, len, type);
1778         else
1779                 return 1;
1780 }
1781
1782 static int ftrace_match_module_records(char *buff, char *mod, int enable)
1783 {
1784         unsigned search_len = 0;
1785         struct ftrace_page *pg;
1786         struct dyn_ftrace *rec;
1787         int type = MATCH_FULL;
1788         char *search = buff;
1789         unsigned long flag;
1790         int not = 0;
1791         int found = 0;
1792
1793         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1794
1795         /* blank or '*' mean the same */
1796         if (strcmp(buff, "*") == 0)
1797                 buff[0] = 0;
1798
1799         /* handle the case of 'dont filter this module' */
1800         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1801                 buff[0] = 0;
1802                 not = 1;
1803         }
1804
1805         if (strlen(buff)) {
1806                 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1807                 search_len = strlen(search);
1808         }
1809
1810         mutex_lock(&ftrace_lock);
1811         do_for_each_ftrace_rec(pg, rec) {
1812
1813                 if (rec->flags & FTRACE_FL_FAILED)
1814                         continue;
1815
1816                 if (ftrace_match_module_record(rec, mod,
1817                                                search, search_len, type)) {
1818                         if (not)
1819                                 rec->flags &= ~flag;
1820                         else
1821                                 rec->flags |= flag;
1822                         found = 1;
1823                 }
1824                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1825                         ftrace_filtered = 1;
1826
1827         } while_for_each_ftrace_rec();
1828         mutex_unlock(&ftrace_lock);
1829
1830         return found;
1831 }
1832
1833 /*
1834  * We register the module command as a template to show others how
1835  * to register the a command as well.
1836  */
1837
1838 static int
1839 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1840 {
1841         char *mod;
1842
1843         /*
1844          * cmd == 'mod' because we only registered this func
1845          * for the 'mod' ftrace_func_command.
1846          * But if you register one func with multiple commands,
1847          * you can tell which command was used by the cmd
1848          * parameter.
1849          */
1850
1851         /* we must have a module name */
1852         if (!param)
1853                 return -EINVAL;
1854
1855         mod = strsep(&param, ":");
1856         if (!strlen(mod))
1857                 return -EINVAL;
1858
1859         if (ftrace_match_module_records(func, mod, enable))
1860                 return 0;
1861         return -EINVAL;
1862 }
1863
1864 static struct ftrace_func_command ftrace_mod_cmd = {
1865         .name                   = "mod",
1866         .func                   = ftrace_mod_callback,
1867 };
1868
1869 static int __init ftrace_mod_cmd_init(void)
1870 {
1871         return register_ftrace_command(&ftrace_mod_cmd);
1872 }
1873 device_initcall(ftrace_mod_cmd_init);
1874
1875 static void
1876 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1877 {
1878         struct ftrace_func_probe *entry;
1879         struct hlist_head *hhd;
1880         struct hlist_node *n;
1881         unsigned long key;
1882         int resched;
1883
1884         key = hash_long(ip, FTRACE_HASH_BITS);
1885
1886         hhd = &ftrace_func_hash[key];
1887
1888         if (hlist_empty(hhd))
1889                 return;
1890
1891         /*
1892          * Disable preemption for these calls to prevent a RCU grace
1893          * period. This syncs the hash iteration and freeing of items
1894          * on the hash. rcu_read_lock is too dangerous here.
1895          */
1896         resched = ftrace_preempt_disable();
1897         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1898                 if (entry->ip == ip)
1899                         entry->ops->func(ip, parent_ip, &entry->data);
1900         }
1901         ftrace_preempt_enable(resched);
1902 }
1903
1904 static struct ftrace_ops trace_probe_ops __read_mostly =
1905 {
1906         .func           = function_trace_probe_call,
1907 };
1908
1909 static int ftrace_probe_registered;
1910
1911 static void __enable_ftrace_function_probe(void)
1912 {
1913         int i;
1914
1915         if (ftrace_probe_registered)
1916                 return;
1917
1918         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1919                 struct hlist_head *hhd = &ftrace_func_hash[i];
1920                 if (hhd->first)
1921                         break;
1922         }
1923         /* Nothing registered? */
1924         if (i == FTRACE_FUNC_HASHSIZE)
1925                 return;
1926
1927         __register_ftrace_function(&trace_probe_ops);
1928         ftrace_startup(0);
1929         ftrace_probe_registered = 1;
1930 }
1931
1932 static void __disable_ftrace_function_probe(void)
1933 {
1934         int i;
1935
1936         if (!ftrace_probe_registered)
1937                 return;
1938
1939         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1940                 struct hlist_head *hhd = &ftrace_func_hash[i];
1941                 if (hhd->first)
1942                         return;
1943         }
1944
1945         /* no more funcs left */
1946         __unregister_ftrace_function(&trace_probe_ops);
1947         ftrace_shutdown(0);
1948         ftrace_probe_registered = 0;
1949 }
1950
1951
1952 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1953 {
1954         struct ftrace_func_probe *entry =
1955                 container_of(rhp, struct ftrace_func_probe, rcu);
1956
1957         if (entry->ops->free)
1958                 entry->ops->free(&entry->data);
1959         kfree(entry);
1960 }
1961
1962
1963 int
1964 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1965                               void *data)
1966 {
1967         struct ftrace_func_probe *entry;
1968         struct ftrace_page *pg;
1969         struct dyn_ftrace *rec;
1970         int type, len, not;
1971         unsigned long key;
1972         int count = 0;
1973         char *search;
1974
1975         type = filter_parse_regex(glob, strlen(glob), &search, &not);
1976         len = strlen(search);
1977
1978         /* we do not support '!' for function probes */
1979         if (WARN_ON(not))
1980                 return -EINVAL;
1981
1982         mutex_lock(&ftrace_lock);
1983         do_for_each_ftrace_rec(pg, rec) {
1984
1985                 if (rec->flags & FTRACE_FL_FAILED)
1986                         continue;
1987
1988                 if (!ftrace_match_record(rec, search, len, type))
1989                         continue;
1990
1991                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1992                 if (!entry) {
1993                         /* If we did not process any, then return error */
1994                         if (!count)
1995                                 count = -ENOMEM;
1996                         goto out_unlock;
1997                 }
1998
1999                 count++;
2000
2001                 entry->data = data;
2002
2003                 /*
2004                  * The caller might want to do something special
2005                  * for each function we find. We call the callback
2006                  * to give the caller an opportunity to do so.
2007                  */
2008                 if (ops->callback) {
2009                         if (ops->callback(rec->ip, &entry->data) < 0) {
2010                                 /* caller does not like this func */
2011                                 kfree(entry);
2012                                 continue;
2013                         }
2014                 }
2015
2016                 entry->ops = ops;
2017                 entry->ip = rec->ip;
2018
2019                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2020                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2021
2022         } while_for_each_ftrace_rec();
2023         __enable_ftrace_function_probe();
2024
2025  out_unlock:
2026         mutex_unlock(&ftrace_lock);
2027
2028         return count;
2029 }
2030
2031 enum {
2032         PROBE_TEST_FUNC         = 1,
2033         PROBE_TEST_DATA         = 2
2034 };
2035
2036 static void
2037 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2038                                   void *data, int flags)
2039 {
2040         struct ftrace_func_probe *entry;
2041         struct hlist_node *n, *tmp;
2042         char str[KSYM_SYMBOL_LEN];
2043         int type = MATCH_FULL;
2044         int i, len = 0;
2045         char *search;
2046
2047         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2048                 glob = NULL;
2049         else if (glob) {
2050                 int not;
2051
2052                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2053                 len = strlen(search);
2054
2055                 /* we do not support '!' for function probes */
2056                 if (WARN_ON(not))
2057                         return;
2058         }
2059
2060         mutex_lock(&ftrace_lock);
2061         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2062                 struct hlist_head *hhd = &ftrace_func_hash[i];
2063
2064                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2065
2066                         /* break up if statements for readability */
2067                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2068                                 continue;
2069
2070                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2071                                 continue;
2072
2073                         /* do this last, since it is the most expensive */
2074                         if (glob) {
2075                                 kallsyms_lookup(entry->ip, NULL, NULL,
2076                                                 NULL, str);
2077                                 if (!ftrace_match(str, glob, len, type))
2078                                         continue;
2079                         }
2080
2081                         hlist_del(&entry->node);
2082                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2083                 }
2084         }
2085         __disable_ftrace_function_probe();
2086         mutex_unlock(&ftrace_lock);
2087 }
2088
2089 void
2090 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2091                                 void *data)
2092 {
2093         __unregister_ftrace_function_probe(glob, ops, data,
2094                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2095 }
2096
2097 void
2098 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2099 {
2100         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2101 }
2102
2103 void unregister_ftrace_function_probe_all(char *glob)
2104 {
2105         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2106 }
2107
2108 static LIST_HEAD(ftrace_commands);
2109 static DEFINE_MUTEX(ftrace_cmd_mutex);
2110
2111 int register_ftrace_command(struct ftrace_func_command *cmd)
2112 {
2113         struct ftrace_func_command *p;
2114         int ret = 0;
2115
2116         mutex_lock(&ftrace_cmd_mutex);
2117         list_for_each_entry(p, &ftrace_commands, list) {
2118                 if (strcmp(cmd->name, p->name) == 0) {
2119                         ret = -EBUSY;
2120                         goto out_unlock;
2121                 }
2122         }
2123         list_add(&cmd->list, &ftrace_commands);
2124  out_unlock:
2125         mutex_unlock(&ftrace_cmd_mutex);
2126
2127         return ret;
2128 }
2129
2130 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2131 {
2132         struct ftrace_func_command *p, *n;
2133         int ret = -ENODEV;
2134
2135         mutex_lock(&ftrace_cmd_mutex);
2136         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2137                 if (strcmp(cmd->name, p->name) == 0) {
2138                         ret = 0;
2139                         list_del_init(&p->list);
2140                         goto out_unlock;
2141                 }
2142         }
2143  out_unlock:
2144         mutex_unlock(&ftrace_cmd_mutex);
2145
2146         return ret;
2147 }
2148
2149 static int ftrace_process_regex(char *buff, int len, int enable)
2150 {
2151         char *func, *command, *next = buff;
2152         struct ftrace_func_command *p;
2153         int ret = -EINVAL;
2154
2155         func = strsep(&next, ":");
2156
2157         if (!next) {
2158                 if (ftrace_match_records(func, len, enable))
2159                         return 0;
2160                 return ret;
2161         }
2162
2163         /* command found */
2164
2165         command = strsep(&next, ":");
2166
2167         mutex_lock(&ftrace_cmd_mutex);
2168         list_for_each_entry(p, &ftrace_commands, list) {
2169                 if (strcmp(p->name, command) == 0) {
2170                         ret = p->func(func, command, next, enable);
2171                         goto out_unlock;
2172                 }
2173         }
2174  out_unlock:
2175         mutex_unlock(&ftrace_cmd_mutex);
2176
2177         return ret;
2178 }
2179
2180 static ssize_t
2181 ftrace_regex_write(struct file *file, const char __user *ubuf,
2182                    size_t cnt, loff_t *ppos, int enable)
2183 {
2184         struct ftrace_iterator *iter;
2185         struct trace_parser *parser;
2186         ssize_t ret, read;
2187
2188         if (!cnt)
2189                 return 0;
2190
2191         mutex_lock(&ftrace_regex_lock);
2192
2193         if (file->f_mode & FMODE_READ) {
2194                 struct seq_file *m = file->private_data;
2195                 iter = m->private;
2196         } else
2197                 iter = file->private_data;
2198
2199         parser = &iter->parser;
2200         read = trace_get_user(parser, ubuf, cnt, ppos);
2201
2202         if (read >= 0 && trace_parser_loaded(parser) &&
2203             !trace_parser_cont(parser)) {
2204                 ret = ftrace_process_regex(parser->buffer,
2205                                            parser->idx, enable);
2206                 trace_parser_clear(parser);
2207                 if (ret)
2208                         goto out_unlock;
2209         }
2210
2211         ret = read;
2212 out_unlock:
2213         mutex_unlock(&ftrace_regex_lock);
2214
2215         return ret;
2216 }
2217
2218 static ssize_t
2219 ftrace_filter_write(struct file *file, const char __user *ubuf,
2220                     size_t cnt, loff_t *ppos)
2221 {
2222         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2223 }
2224
2225 static ssize_t
2226 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2227                      size_t cnt, loff_t *ppos)
2228 {
2229         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2230 }
2231
2232 static void
2233 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2234 {
2235         if (unlikely(ftrace_disabled))
2236                 return;
2237
2238         mutex_lock(&ftrace_regex_lock);
2239         if (reset)
2240                 ftrace_filter_reset(enable);
2241         if (buf)
2242                 ftrace_match_records(buf, len, enable);
2243         mutex_unlock(&ftrace_regex_lock);
2244 }
2245
2246 /**
2247  * ftrace_set_filter - set a function to filter on in ftrace
2248  * @buf - the string that holds the function filter text.
2249  * @len - the length of the string.
2250  * @reset - non zero to reset all filters before applying this filter.
2251  *
2252  * Filters denote which functions should be enabled when tracing is enabled.
2253  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2254  */
2255 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2256 {
2257         ftrace_set_regex(buf, len, reset, 1);
2258 }
2259
2260 /**
2261  * ftrace_set_notrace - set a function to not trace in ftrace
2262  * @buf - the string that holds the function notrace text.
2263  * @len - the length of the string.
2264  * @reset - non zero to reset all filters before applying this filter.
2265  *
2266  * Notrace Filters denote which functions should not be enabled when tracing
2267  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2268  * for tracing.
2269  */
2270 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2271 {
2272         ftrace_set_regex(buf, len, reset, 0);
2273 }
2274
2275 /*
2276  * command line interface to allow users to set filters on boot up.
2277  */
2278 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2279 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2280 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2281
2282 static int __init set_ftrace_notrace(char *str)
2283 {
2284         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2285         return 1;
2286 }
2287 __setup("ftrace_notrace=", set_ftrace_notrace);
2288
2289 static int __init set_ftrace_filter(char *str)
2290 {
2291         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2292         return 1;
2293 }
2294 __setup("ftrace_filter=", set_ftrace_filter);
2295
2296 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2297 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2298 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2299
2300 static int __init set_graph_function(char *str)
2301 {
2302         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2303         return 1;
2304 }
2305 __setup("ftrace_graph_filter=", set_graph_function);
2306
2307 static void __init set_ftrace_early_graph(char *buf)
2308 {
2309         int ret;
2310         char *func;
2311
2312         while (buf) {
2313                 func = strsep(&buf, ",");
2314                 /* we allow only one expression at a time */
2315                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2316                                       func);
2317                 if (ret)
2318                         printk(KERN_DEBUG "ftrace: function %s not "
2319                                           "traceable\n", func);
2320         }
2321 }
2322 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2323
2324 static void __init set_ftrace_early_filter(char *buf, int enable)
2325 {
2326         char *func;
2327
2328         while (buf) {
2329                 func = strsep(&buf, ",");
2330                 ftrace_set_regex(func, strlen(func), 0, enable);
2331         }
2332 }
2333
2334 static void __init set_ftrace_early_filters(void)
2335 {
2336         if (ftrace_filter_buf[0])
2337                 set_ftrace_early_filter(ftrace_filter_buf, 1);
2338         if (ftrace_notrace_buf[0])
2339                 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2340 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2341         if (ftrace_graph_buf[0])
2342                 set_ftrace_early_graph(ftrace_graph_buf);
2343 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2344 }
2345
2346 static int
2347 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2348 {
2349         struct seq_file *m = (struct seq_file *)file->private_data;
2350         struct ftrace_iterator *iter;
2351         struct trace_parser *parser;
2352
2353         mutex_lock(&ftrace_regex_lock);
2354         if (file->f_mode & FMODE_READ) {
2355                 iter = m->private;
2356
2357                 seq_release(inode, file);
2358         } else
2359                 iter = file->private_data;
2360
2361         parser = &iter->parser;
2362         if (trace_parser_loaded(parser)) {
2363                 parser->buffer[parser->idx] = 0;
2364                 ftrace_match_records(parser->buffer, parser->idx, enable);
2365         }
2366
2367         mutex_lock(&ftrace_lock);
2368         if (ftrace_start_up && ftrace_enabled)
2369                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2370         mutex_unlock(&ftrace_lock);
2371
2372         trace_parser_put(parser);
2373         kfree(iter);
2374
2375         mutex_unlock(&ftrace_regex_lock);
2376         return 0;
2377 }
2378
2379 static int
2380 ftrace_filter_release(struct inode *inode, struct file *file)
2381 {
2382         return ftrace_regex_release(inode, file, 1);
2383 }
2384
2385 static int
2386 ftrace_notrace_release(struct inode *inode, struct file *file)
2387 {
2388         return ftrace_regex_release(inode, file, 0);
2389 }
2390
2391 static const struct file_operations ftrace_avail_fops = {
2392         .open = ftrace_avail_open,
2393         .read = seq_read,
2394         .llseek = seq_lseek,
2395         .release = seq_release_private,
2396 };
2397
2398 static const struct file_operations ftrace_failures_fops = {
2399         .open = ftrace_failures_open,
2400         .read = seq_read,
2401         .llseek = seq_lseek,
2402         .release = seq_release_private,
2403 };
2404
2405 static const struct file_operations ftrace_filter_fops = {
2406         .open = ftrace_filter_open,
2407         .read = seq_read,
2408         .write = ftrace_filter_write,
2409         .llseek = ftrace_regex_lseek,
2410         .release = ftrace_filter_release,
2411 };
2412
2413 static const struct file_operations ftrace_notrace_fops = {
2414         .open = ftrace_notrace_open,
2415         .read = seq_read,
2416         .write = ftrace_notrace_write,
2417         .llseek = ftrace_regex_lseek,
2418         .release = ftrace_notrace_release,
2419 };
2420
2421 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2422
2423 static DEFINE_MUTEX(graph_lock);
2424
2425 int ftrace_graph_count;
2426 int ftrace_graph_filter_enabled;
2427 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2428
2429 static void *
2430 __g_next(struct seq_file *m, loff_t *pos)
2431 {
2432         if (*pos >= ftrace_graph_count)
2433                 return NULL;
2434         return &ftrace_graph_funcs[*pos];
2435 }
2436
2437 static void *
2438 g_next(struct seq_file *m, void *v, loff_t *pos)
2439 {
2440         (*pos)++;
2441         return __g_next(m, pos);
2442 }
2443
2444 static void *g_start(struct seq_file *m, loff_t *pos)
2445 {
2446         mutex_lock(&graph_lock);
2447
2448         /* Nothing, tell g_show to print all functions are enabled */
2449         if (!ftrace_graph_filter_enabled && !*pos)
2450                 return (void *)1;
2451
2452         return __g_next(m, pos);
2453 }
2454
2455 static void g_stop(struct seq_file *m, void *p)
2456 {
2457         mutex_unlock(&graph_lock);
2458 }
2459
2460 static int g_show(struct seq_file *m, void *v)
2461 {
2462         unsigned long *ptr = v;
2463
2464         if (!ptr)
2465                 return 0;
2466
2467         if (ptr == (unsigned long *)1) {
2468                 seq_printf(m, "#### all functions enabled ####\n");
2469                 return 0;
2470         }
2471
2472         seq_printf(m, "%ps\n", (void *)*ptr);
2473
2474         return 0;
2475 }
2476
2477 static const struct seq_operations ftrace_graph_seq_ops = {
2478         .start = g_start,
2479         .next = g_next,
2480         .stop = g_stop,
2481         .show = g_show,
2482 };
2483
2484 static int
2485 ftrace_graph_open(struct inode *inode, struct file *file)
2486 {
2487         int ret = 0;
2488
2489         if (unlikely(ftrace_disabled))
2490                 return -ENODEV;
2491
2492         mutex_lock(&graph_lock);
2493         if ((file->f_mode & FMODE_WRITE) &&
2494             (file->f_flags & O_TRUNC)) {
2495                 ftrace_graph_filter_enabled = 0;
2496                 ftrace_graph_count = 0;
2497                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2498         }
2499         mutex_unlock(&graph_lock);
2500
2501         if (file->f_mode & FMODE_READ)
2502                 ret = seq_open(file, &ftrace_graph_seq_ops);
2503
2504         return ret;
2505 }
2506
2507 static int
2508 ftrace_graph_release(struct inode *inode, struct file *file)
2509 {
2510         if (file->f_mode & FMODE_READ)
2511                 seq_release(inode, file);
2512         return 0;
2513 }
2514
2515 static int
2516 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2517 {
2518         struct dyn_ftrace *rec;
2519         struct ftrace_page *pg;
2520         int search_len;
2521         int fail = 1;
2522         int type, not;
2523         char *search;
2524         bool exists;
2525         int i;
2526
2527         if (ftrace_disabled)
2528                 return -ENODEV;
2529
2530         /* decode regex */
2531         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2532         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2533                 return -EBUSY;
2534
2535         search_len = strlen(search);
2536
2537         mutex_lock(&ftrace_lock);
2538         do_for_each_ftrace_rec(pg, rec) {
2539
2540                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2541                         continue;
2542
2543                 if (ftrace_match_record(rec, search, search_len, type)) {
2544                         /* if it is in the array */
2545                         exists = false;
2546                         for (i = 0; i < *idx; i++) {
2547                                 if (array[i] == rec->ip) {
2548                                         exists = true;
2549                                         break;
2550                                 }
2551                         }
2552
2553                         if (!not) {
2554                                 fail = 0;
2555                                 if (!exists) {
2556                                         array[(*idx)++] = rec->ip;
2557                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2558                                                 goto out;
2559                                 }
2560                         } else {
2561                                 if (exists) {
2562                                         array[i] = array[--(*idx)];
2563                                         array[*idx] = 0;
2564                                         fail = 0;
2565                                 }
2566                         }
2567                 }
2568         } while_for_each_ftrace_rec();
2569 out:
2570         mutex_unlock(&ftrace_lock);
2571
2572         if (fail)
2573                 return -EINVAL;
2574
2575         ftrace_graph_filter_enabled = 1;
2576         return 0;
2577 }
2578
2579 static ssize_t
2580 ftrace_graph_write(struct file *file, const char __user *ubuf,
2581                    size_t cnt, loff_t *ppos)
2582 {
2583         struct trace_parser parser;
2584         ssize_t read, ret;
2585
2586         if (!cnt)
2587                 return 0;
2588
2589         mutex_lock(&graph_lock);
2590
2591         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2592                 ret = -ENOMEM;
2593                 goto out_unlock;
2594         }
2595
2596         read = trace_get_user(&parser, ubuf, cnt, ppos);
2597
2598         if (read >= 0 && trace_parser_loaded((&parser))) {
2599                 parser.buffer[parser.idx] = 0;
2600
2601                 /* we allow only one expression at a time */
2602                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2603                                         parser.buffer);
2604                 if (ret)
2605                         goto out_free;
2606         }
2607
2608         ret = read;
2609
2610 out_free:
2611         trace_parser_put(&parser);
2612 out_unlock:
2613         mutex_unlock(&graph_lock);
2614
2615         return ret;
2616 }
2617
2618 static const struct file_operations ftrace_graph_fops = {
2619         .open           = ftrace_graph_open,
2620         .read           = seq_read,
2621         .write          = ftrace_graph_write,
2622         .release        = ftrace_graph_release,
2623 };
2624 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2625
2626 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2627 {
2628
2629         trace_create_file("available_filter_functions", 0444,
2630                         d_tracer, NULL, &ftrace_avail_fops);
2631
2632         trace_create_file("failures", 0444,
2633                         d_tracer, NULL, &ftrace_failures_fops);
2634
2635         trace_create_file("set_ftrace_filter", 0644, d_tracer,
2636                         NULL, &ftrace_filter_fops);
2637
2638         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2639                                     NULL, &ftrace_notrace_fops);
2640
2641 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2642         trace_create_file("set_graph_function", 0444, d_tracer,
2643                                     NULL,
2644                                     &ftrace_graph_fops);
2645 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2646
2647         return 0;
2648 }
2649
2650 static int ftrace_process_locs(struct module *mod,
2651                                unsigned long *start,
2652                                unsigned long *end)
2653 {
2654         unsigned long *p;
2655         unsigned long addr;
2656         unsigned long flags;
2657
2658         mutex_lock(&ftrace_lock);
2659         p = start;
2660         while (p < end) {
2661                 addr = ftrace_call_adjust(*p++);
2662                 /*
2663                  * Some architecture linkers will pad between
2664                  * the different mcount_loc sections of different
2665                  * object files to satisfy alignments.
2666                  * Skip any NULL pointers.
2667                  */
2668                 if (!addr)
2669                         continue;
2670                 ftrace_record_ip(addr);
2671         }
2672
2673         /* disable interrupts to prevent kstop machine */
2674         local_irq_save(flags);
2675         ftrace_update_code(mod);
2676         local_irq_restore(flags);
2677         mutex_unlock(&ftrace_lock);
2678
2679         return 0;
2680 }
2681
2682 #ifdef CONFIG_MODULES
2683 void ftrace_release_mod(struct module *mod)
2684 {
2685         struct dyn_ftrace *rec;
2686         struct ftrace_page *pg;
2687
2688         if (ftrace_disabled)
2689                 return;
2690
2691         mutex_lock(&ftrace_lock);
2692         do_for_each_ftrace_rec(pg, rec) {
2693                 if (within_module_core(rec->ip, mod)) {
2694                         /*
2695                          * rec->ip is changed in ftrace_free_rec()
2696                          * It should not between s and e if record was freed.
2697                          */
2698                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2699                         ftrace_free_rec(rec);
2700                 }
2701         } while_for_each_ftrace_rec();
2702         mutex_unlock(&ftrace_lock);
2703 }
2704
2705 static void ftrace_init_module(struct module *mod,
2706                                unsigned long *start, unsigned long *end)
2707 {
2708         if (ftrace_disabled || start == end)
2709                 return;
2710         ftrace_process_locs(mod, start, end);
2711 }
2712
2713 static int ftrace_module_notify(struct notifier_block *self,
2714                                 unsigned long val, void *data)
2715 {
2716         struct module *mod = data;
2717
2718         switch (val) {
2719         case MODULE_STATE_COMING:
2720                 ftrace_init_module(mod, mod->ftrace_callsites,
2721                                    mod->ftrace_callsites +
2722                                    mod->num_ftrace_callsites);
2723                 break;
2724         case MODULE_STATE_GOING:
2725                 ftrace_release_mod(mod);
2726                 break;
2727         }
2728
2729         return 0;
2730 }
2731 #else
2732 static int ftrace_module_notify(struct notifier_block *self,
2733                                 unsigned long val, void *data)
2734 {
2735         return 0;
2736 }
2737 #endif /* CONFIG_MODULES */
2738
2739 struct notifier_block ftrace_module_nb = {
2740         .notifier_call = ftrace_module_notify,
2741         .priority = 0,
2742 };
2743
2744 extern unsigned long __start_mcount_loc[];
2745 extern unsigned long __stop_mcount_loc[];
2746
2747 void __init ftrace_init(void)
2748 {
2749         unsigned long count, addr, flags;
2750         int ret;
2751
2752         /* Keep the ftrace pointer to the stub */
2753         addr = (unsigned long)ftrace_stub;
2754
2755         local_irq_save(flags);
2756         ftrace_dyn_arch_init(&addr);
2757         local_irq_restore(flags);
2758
2759         /* ftrace_dyn_arch_init places the return code in addr */
2760         if (addr)
2761                 goto failed;
2762
2763         count = __stop_mcount_loc - __start_mcount_loc;
2764
2765         ret = ftrace_dyn_table_alloc(count);
2766         if (ret)
2767                 goto failed;
2768
2769         last_ftrace_enabled = ftrace_enabled = 1;
2770
2771         ret = ftrace_process_locs(NULL,
2772                                   __start_mcount_loc,
2773                                   __stop_mcount_loc);
2774
2775         ret = register_module_notifier(&ftrace_module_nb);
2776         if (ret)
2777                 pr_warning("Failed to register trace ftrace module notifier\n");
2778
2779         set_ftrace_early_filters();
2780
2781         return;
2782  failed:
2783         ftrace_disabled = 1;
2784 }
2785
2786 #else
2787
2788 static int __init ftrace_nodyn_init(void)
2789 {
2790         ftrace_enabled = 1;
2791         return 0;
2792 }
2793 device_initcall(ftrace_nodyn_init);
2794
2795 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2796 static inline void ftrace_startup_enable(int command) { }
2797 /* Keep as macros so we do not need to define the commands */
2798 # define ftrace_startup(command)        do { } while (0)
2799 # define ftrace_shutdown(command)       do { } while (0)
2800 # define ftrace_startup_sysctl()        do { } while (0)
2801 # define ftrace_shutdown_sysctl()       do { } while (0)
2802 #endif /* CONFIG_DYNAMIC_FTRACE */
2803
2804 static void clear_ftrace_swapper(void)
2805 {
2806         struct task_struct *p;
2807         int cpu;
2808
2809         get_online_cpus();
2810         for_each_online_cpu(cpu) {
2811                 p = idle_task(cpu);
2812                 clear_tsk_trace_trace(p);
2813         }
2814         put_online_cpus();
2815 }
2816
2817 static void set_ftrace_swapper(void)
2818 {
2819         struct task_struct *p;
2820         int cpu;
2821
2822         get_online_cpus();
2823         for_each_online_cpu(cpu) {
2824                 p = idle_task(cpu);
2825                 set_tsk_trace_trace(p);
2826         }
2827         put_online_cpus();
2828 }
2829
2830 static void clear_ftrace_pid(struct pid *pid)
2831 {
2832         struct task_struct *p;
2833
2834         rcu_read_lock();
2835         do_each_pid_task(pid, PIDTYPE_PID, p) {
2836                 clear_tsk_trace_trace(p);
2837         } while_each_pid_task(pid, PIDTYPE_PID, p);
2838         rcu_read_unlock();
2839
2840         put_pid(pid);
2841 }
2842
2843 static void set_ftrace_pid(struct pid *pid)
2844 {
2845         struct task_struct *p;
2846
2847         rcu_read_lock();
2848         do_each_pid_task(pid, PIDTYPE_PID, p) {
2849                 set_tsk_trace_trace(p);
2850         } while_each_pid_task(pid, PIDTYPE_PID, p);
2851         rcu_read_unlock();
2852 }
2853
2854 static void clear_ftrace_pid_task(struct pid *pid)
2855 {
2856         if (pid == ftrace_swapper_pid)
2857                 clear_ftrace_swapper();
2858         else
2859                 clear_ftrace_pid(pid);
2860 }
2861
2862 static void set_ftrace_pid_task(struct pid *pid)
2863 {
2864         if (pid == ftrace_swapper_pid)
2865                 set_ftrace_swapper();
2866         else
2867                 set_ftrace_pid(pid);
2868 }
2869
2870 static int ftrace_pid_add(int p)
2871 {
2872         struct pid *pid;
2873         struct ftrace_pid *fpid;
2874         int ret = -EINVAL;
2875
2876         mutex_lock(&ftrace_lock);
2877
2878         if (!p)
2879                 pid = ftrace_swapper_pid;
2880         else
2881                 pid = find_get_pid(p);
2882
2883         if (!pid)
2884                 goto out;
2885
2886         ret = 0;
2887
2888         list_for_each_entry(fpid, &ftrace_pids, list)
2889                 if (fpid->pid == pid)
2890                         goto out_put;
2891
2892         ret = -ENOMEM;
2893
2894         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2895         if (!fpid)
2896                 goto out_put;
2897
2898         list_add(&fpid->list, &ftrace_pids);
2899         fpid->pid = pid;
2900
2901         set_ftrace_pid_task(pid);
2902
2903         ftrace_update_pid_func();
2904         ftrace_startup_enable(0);
2905
2906         mutex_unlock(&ftrace_lock);
2907         return 0;
2908
2909 out_put:
2910         if (pid != ftrace_swapper_pid)
2911                 put_pid(pid);
2912
2913 out:
2914         mutex_unlock(&ftrace_lock);
2915         return ret;
2916 }
2917
2918 static void ftrace_pid_reset(void)
2919 {
2920         struct ftrace_pid *fpid, *safe;
2921
2922         mutex_lock(&ftrace_lock);
2923         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2924                 struct pid *pid = fpid->pid;
2925
2926                 clear_ftrace_pid_task(pid);
2927
2928                 list_del(&fpid->list);
2929                 kfree(fpid);
2930         }
2931
2932         ftrace_update_pid_func();
2933         ftrace_startup_enable(0);
2934
2935         mutex_unlock(&ftrace_lock);
2936 }
2937
2938 static void *fpid_start(struct seq_file *m, loff_t *pos)
2939 {
2940         mutex_lock(&ftrace_lock);
2941
2942         if (list_empty(&ftrace_pids) && (!*pos))
2943                 return (void *) 1;
2944
2945         return seq_list_start(&ftrace_pids, *pos);
2946 }
2947
2948 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2949 {
2950         if (v == (void *)1)
2951                 return NULL;
2952
2953         return seq_list_next(v, &ftrace_pids, pos);
2954 }
2955
2956 static void fpid_stop(struct seq_file *m, void *p)
2957 {
2958         mutex_unlock(&ftrace_lock);
2959 }
2960
2961 static int fpid_show(struct seq_file *m, void *v)
2962 {
2963         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2964
2965         if (v == (void *)1) {
2966                 seq_printf(m, "no pid\n");
2967                 return 0;
2968         }
2969
2970         if (fpid->pid == ftrace_swapper_pid)
2971                 seq_printf(m, "swapper tasks\n");
2972         else
2973                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2974
2975         return 0;
2976 }
2977
2978 static const struct seq_operations ftrace_pid_sops = {
2979         .start = fpid_start,
2980         .next = fpid_next,
2981         .stop = fpid_stop,
2982         .show = fpid_show,
2983 };
2984
2985 static int
2986 ftrace_pid_open(struct inode *inode, struct file *file)
2987 {
2988         int ret = 0;
2989
2990         if ((file->f_mode & FMODE_WRITE) &&
2991             (file->f_flags & O_TRUNC))
2992                 ftrace_pid_reset();
2993
2994         if (file->f_mode & FMODE_READ)
2995                 ret = seq_open(file, &ftrace_pid_sops);
2996
2997         return ret;
2998 }
2999
3000 static ssize_t
3001 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3002                    size_t cnt, loff_t *ppos)
3003 {
3004         char buf[64], *tmp;
3005         long val;
3006         int ret;
3007
3008         if (cnt >= sizeof(buf))
3009                 return -EINVAL;
3010
3011         if (copy_from_user(&buf, ubuf, cnt))
3012                 return -EFAULT;
3013
3014         buf[cnt] = 0;
3015
3016         /*
3017          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3018          * to clean the filter quietly.
3019          */
3020         tmp = strstrip(buf);
3021         if (strlen(tmp) == 0)
3022                 return 1;
3023
3024         ret = strict_strtol(tmp, 10, &val);
3025         if (ret < 0)
3026                 return ret;
3027
3028         ret = ftrace_pid_add(val);
3029
3030         return ret ? ret : cnt;
3031 }
3032
3033 static int
3034 ftrace_pid_release(struct inode *inode, struct file *file)
3035 {
3036         if (file->f_mode & FMODE_READ)
3037                 seq_release(inode, file);
3038
3039         return 0;
3040 }
3041
3042 static const struct file_operations ftrace_pid_fops = {
3043         .open           = ftrace_pid_open,
3044         .write          = ftrace_pid_write,
3045         .read           = seq_read,
3046         .llseek         = seq_lseek,
3047         .release        = ftrace_pid_release,
3048 };
3049
3050 static __init int ftrace_init_debugfs(void)
3051 {
3052         struct dentry *d_tracer;
3053
3054         d_tracer = tracing_init_dentry();
3055         if (!d_tracer)
3056                 return 0;
3057
3058         ftrace_init_dyn_debugfs(d_tracer);
3059
3060         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3061                             NULL, &ftrace_pid_fops);
3062
3063         ftrace_profile_debugfs(d_tracer);
3064
3065         return 0;
3066 }
3067 fs_initcall(ftrace_init_debugfs);
3068
3069 /**
3070  * ftrace_kill - kill ftrace
3071  *
3072  * This function should be used by panic code. It stops ftrace
3073  * but in a not so nice way. If you need to simply kill ftrace
3074  * from a non-atomic section, use ftrace_kill.
3075  */
3076 void ftrace_kill(void)
3077 {
3078         ftrace_disabled = 1;
3079         ftrace_enabled = 0;
3080         clear_ftrace_function();
3081 }
3082
3083 /**
3084  * register_ftrace_function - register a function for profiling
3085  * @ops - ops structure that holds the function for profiling.
3086  *
3087  * Register a function to be called by all functions in the
3088  * kernel.
3089  *
3090  * Note: @ops->func and all the functions it calls must be labeled
3091  *       with "notrace", otherwise it will go into a
3092  *       recursive loop.
3093  */
3094 int register_ftrace_function(struct ftrace_ops *ops)
3095 {
3096         int ret;
3097
3098         if (unlikely(ftrace_disabled))
3099                 return -1;
3100
3101         mutex_lock(&ftrace_lock);
3102
3103         ret = __register_ftrace_function(ops);
3104         ftrace_startup(0);
3105
3106         mutex_unlock(&ftrace_lock);
3107         return ret;
3108 }
3109
3110 /**
3111  * unregister_ftrace_function - unregister a function for profiling.
3112  * @ops - ops structure that holds the function to unregister
3113  *
3114  * Unregister a function that was added to be called by ftrace profiling.
3115  */
3116 int unregister_ftrace_function(struct ftrace_ops *ops)
3117 {
3118         int ret;
3119
3120         mutex_lock(&ftrace_lock);
3121         ret = __unregister_ftrace_function(ops);
3122         ftrace_shutdown(0);
3123         mutex_unlock(&ftrace_lock);
3124
3125         return ret;
3126 }
3127
3128 int
3129 ftrace_enable_sysctl(struct ctl_table *table, int write,
3130                      void __user *buffer, size_t *lenp,
3131                      loff_t *ppos)
3132 {
3133         int ret;
3134
3135         if (unlikely(ftrace_disabled))
3136                 return -ENODEV;
3137
3138         mutex_lock(&ftrace_lock);
3139
3140         ret  = proc_dointvec(table, write, buffer, lenp, ppos);
3141
3142         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3143                 goto out;
3144
3145         last_ftrace_enabled = !!ftrace_enabled;
3146
3147         if (ftrace_enabled) {
3148
3149                 ftrace_startup_sysctl();
3150
3151                 /* we are starting ftrace again */
3152                 if (ftrace_list != &ftrace_list_end) {
3153                         if (ftrace_list->next == &ftrace_list_end)
3154                                 ftrace_trace_function = ftrace_list->func;
3155                         else
3156                                 ftrace_trace_function = ftrace_list_func;
3157                 }
3158
3159         } else {
3160                 /* stopping ftrace calls (just send to ftrace_stub) */
3161                 ftrace_trace_function = ftrace_stub;
3162
3163                 ftrace_shutdown_sysctl();
3164         }
3165
3166  out:
3167         mutex_unlock(&ftrace_lock);
3168         return ret;
3169 }
3170
3171 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3172
3173 static int ftrace_graph_active;
3174 static struct notifier_block ftrace_suspend_notifier;
3175
3176 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3177 {
3178         return 0;
3179 }
3180
3181 /* The callbacks that hook a function */
3182 trace_func_graph_ret_t ftrace_graph_return =
3183                         (trace_func_graph_ret_t)ftrace_stub;
3184 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3185
3186 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3187 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3188 {
3189         int i;
3190         int ret = 0;
3191         unsigned long flags;
3192         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3193         struct task_struct *g, *t;
3194
3195         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3196                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3197                                         * sizeof(struct ftrace_ret_stack),
3198                                         GFP_KERNEL);
3199                 if (!ret_stack_list[i]) {
3200                         start = 0;
3201                         end = i;
3202                         ret = -ENOMEM;
3203                         goto free;
3204                 }
3205         }
3206
3207         read_lock_irqsave(&tasklist_lock, flags);
3208         do_each_thread(g, t) {
3209                 if (start == end) {
3210                         ret = -EAGAIN;
3211                         goto unlock;
3212                 }
3213
3214                 if (t->ret_stack == NULL) {
3215                         atomic_set(&t->tracing_graph_pause, 0);
3216                         atomic_set(&t->trace_overrun, 0);
3217                         t->curr_ret_stack = -1;
3218                         /* Make sure the tasks see the -1 first: */
3219                         smp_wmb();
3220                         t->ret_stack = ret_stack_list[start++];
3221                 }
3222         } while_each_thread(g, t);
3223
3224 unlock:
3225         read_unlock_irqrestore(&tasklist_lock, flags);
3226 free:
3227         for (i = start; i < end; i++)
3228                 kfree(ret_stack_list[i]);
3229         return ret;
3230 }
3231
3232 static void
3233 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3234                                 struct task_struct *next)
3235 {
3236         unsigned long long timestamp;
3237         int index;
3238
3239         /*
3240          * Does the user want to count the time a function was asleep.
3241          * If so, do not update the time stamps.
3242          */
3243         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3244                 return;
3245
3246         timestamp = trace_clock_local();
3247
3248         prev->ftrace_timestamp = timestamp;
3249
3250         /* only process tasks that we timestamped */
3251         if (!next->ftrace_timestamp)
3252                 return;
3253
3254         /*
3255          * Update all the counters in next to make up for the
3256          * time next was sleeping.
3257          */
3258         timestamp -= next->ftrace_timestamp;
3259
3260         for (index = next->curr_ret_stack; index >= 0; index--)
3261                 next->ret_stack[index].calltime += timestamp;
3262 }
3263
3264 /* Allocate a return stack for each task */
3265 static int start_graph_tracing(void)
3266 {
3267         struct ftrace_ret_stack **ret_stack_list;
3268         int ret, cpu;
3269
3270         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3271                                 sizeof(struct ftrace_ret_stack *),
3272                                 GFP_KERNEL);
3273
3274         if (!ret_stack_list)
3275                 return -ENOMEM;
3276
3277         /* The cpu_boot init_task->ret_stack will never be freed */
3278         for_each_online_cpu(cpu) {
3279                 if (!idle_task(cpu)->ret_stack)
3280                         ftrace_graph_init_task(idle_task(cpu));
3281         }
3282
3283         do {
3284                 ret = alloc_retstack_tasklist(ret_stack_list);
3285         } while (ret == -EAGAIN);
3286
3287         if (!ret) {
3288                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3289                 if (ret)
3290                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3291                                 " probe to kernel_sched_switch\n");
3292         }
3293
3294         kfree(ret_stack_list);
3295         return ret;
3296 }
3297
3298 /*
3299  * Hibernation protection.
3300  * The state of the current task is too much unstable during
3301  * suspend/restore to disk. We want to protect against that.
3302  */
3303 static int
3304 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3305                                                         void *unused)
3306 {
3307         switch (state) {
3308         case PM_HIBERNATION_PREPARE:
3309                 pause_graph_tracing();
3310                 break;
3311
3312         case PM_POST_HIBERNATION:
3313                 unpause_graph_tracing();
3314                 break;
3315         }
3316         return NOTIFY_DONE;
3317 }
3318
3319 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3320                         trace_func_graph_ent_t entryfunc)
3321 {
3322         int ret = 0;
3323
3324         mutex_lock(&ftrace_lock);
3325
3326         /* we currently allow only one tracer registered at a time */
3327         if (ftrace_graph_active) {
3328                 ret = -EBUSY;
3329                 goto out;
3330         }
3331
3332         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3333         register_pm_notifier(&ftrace_suspend_notifier);
3334
3335         ftrace_graph_active++;
3336         ret = start_graph_tracing();
3337         if (ret) {
3338                 ftrace_graph_active--;
3339                 goto out;
3340         }
3341
3342         ftrace_graph_return = retfunc;
3343         ftrace_graph_entry = entryfunc;
3344
3345         ftrace_startup(FTRACE_START_FUNC_RET);
3346
3347 out:
3348         mutex_unlock(&ftrace_lock);
3349         return ret;
3350 }
3351
3352 void unregister_ftrace_graph(void)
3353 {
3354         mutex_lock(&ftrace_lock);
3355
3356         if (unlikely(!ftrace_graph_active))
3357                 goto out;
3358
3359         ftrace_graph_active--;
3360         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3361         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3362         ftrace_graph_entry = ftrace_graph_entry_stub;
3363         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3364         unregister_pm_notifier(&ftrace_suspend_notifier);
3365
3366  out:
3367         mutex_unlock(&ftrace_lock);
3368 }
3369
3370 /* Allocate a return stack for newly created task */
3371 void ftrace_graph_init_task(struct task_struct *t)
3372 {
3373         /* Make sure we do not use the parent ret_stack */
3374         t->ret_stack = NULL;
3375         t->curr_ret_stack = -1;
3376
3377         if (ftrace_graph_active) {
3378                 struct ftrace_ret_stack *ret_stack;
3379
3380                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3381                                 * sizeof(struct ftrace_ret_stack),
3382                                 GFP_KERNEL);
3383                 if (!ret_stack)
3384                         return;
3385                 atomic_set(&t->tracing_graph_pause, 0);
3386                 atomic_set(&t->trace_overrun, 0);
3387                 t->ftrace_timestamp = 0;
3388                 /* make curr_ret_stack visable before we add the ret_stack */
3389                 smp_wmb();
3390                 t->ret_stack = ret_stack;
3391         }
3392 }
3393
3394 void ftrace_graph_exit_task(struct task_struct *t)
3395 {
3396         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3397
3398         t->ret_stack = NULL;
3399         /* NULL must become visible to IRQs before we free it: */
3400         barrier();
3401
3402         kfree(ret_stack);
3403 }
3404
3405 void ftrace_graph_stop(void)
3406 {
3407         ftrace_stop();
3408 }
3409 #endif