]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - kernel/trace/ftrace.c
mm: remove last trace of shmem_get_unmapped_area
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
62
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
66
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
69
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73         struct list_head list;
74         struct pid *pid;
75 };
76
77 /*
78  * ftrace_disabled is set when an anomaly is discovered.
79  * ftrace_disabled is much stronger than ftrace_enabled.
80  */
81 static int ftrace_disabled __read_mostly;
82
83 static DEFINE_MUTEX(ftrace_lock);
84
85 static struct ftrace_ops ftrace_list_end __read_mostly =
86 {
87         .func           = ftrace_stub,
88 };
89
90 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
91 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
92 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
94 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
95 static struct ftrace_ops global_ops;
96
97 static void
98 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
99
100 /*
101  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
102  * can use rcu_dereference_raw() is that elements removed from this list
103  * are simply leaked, so there is no need to interact with a grace-period
104  * mechanism.  The rcu_dereference_raw() calls are needed to handle
105  * concurrent insertions into the ftrace_global_list.
106  *
107  * Silly Alpha and silly pointer-speculation compiler optimizations!
108  */
109 static void ftrace_global_list_func(unsigned long ip,
110                                     unsigned long parent_ip)
111 {
112         struct ftrace_ops *op = rcu_dereference_raw(ftrace_global_list); /*see above*/
113
114         while (op != &ftrace_list_end) {
115                 op->func(ip, parent_ip);
116                 op = rcu_dereference_raw(op->next); /*see above*/
117         };
118 }
119
120 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
121 {
122         if (!test_tsk_trace_trace(current))
123                 return;
124
125         ftrace_pid_function(ip, parent_ip);
126 }
127
128 static void set_ftrace_pid_function(ftrace_func_t func)
129 {
130         /* do not set ftrace_pid_function to itself! */
131         if (func != ftrace_pid_func)
132                 ftrace_pid_function = func;
133 }
134
135 /**
136  * clear_ftrace_function - reset the ftrace function
137  *
138  * This NULLs the ftrace function and in essence stops
139  * tracing.  There may be lag
140  */
141 void clear_ftrace_function(void)
142 {
143         ftrace_trace_function = ftrace_stub;
144         __ftrace_trace_function = ftrace_stub;
145         ftrace_pid_function = ftrace_stub;
146 }
147
148 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
149 /*
150  * For those archs that do not test ftrace_trace_stop in their
151  * mcount call site, we need to do it from C.
152  */
153 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
154 {
155         if (function_trace_stop)
156                 return;
157
158         __ftrace_trace_function(ip, parent_ip);
159 }
160 #endif
161
162 static void update_global_ops(void)
163 {
164         ftrace_func_t func;
165
166         /*
167          * If there's only one function registered, then call that
168          * function directly. Otherwise, we need to iterate over the
169          * registered callers.
170          */
171         if (ftrace_global_list == &ftrace_list_end ||
172             ftrace_global_list->next == &ftrace_list_end)
173                 func = ftrace_global_list->func;
174         else
175                 func = ftrace_global_list_func;
176
177         /* If we filter on pids, update to use the pid function */
178         if (!list_empty(&ftrace_pids)) {
179                 set_ftrace_pid_function(func);
180                 func = ftrace_pid_func;
181         }
182
183         global_ops.func = func;
184 }
185
186 static void update_ftrace_function(void)
187 {
188         ftrace_func_t func;
189
190         update_global_ops();
191
192         /*
193          * If we are at the end of the list and this ops is
194          * not dynamic, then have the mcount trampoline call
195          * the function directly
196          */
197         if (ftrace_ops_list == &ftrace_list_end ||
198             (ftrace_ops_list->next == &ftrace_list_end &&
199              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
200                 func = ftrace_ops_list->func;
201         else
202                 func = ftrace_ops_list_func;
203
204 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
205         ftrace_trace_function = func;
206 #else
207         __ftrace_trace_function = func;
208         ftrace_trace_function = ftrace_test_stop_func;
209 #endif
210 }
211
212 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
213 {
214         ops->next = *list;
215         /*
216          * We are entering ops into the list but another
217          * CPU might be walking that list. We need to make sure
218          * the ops->next pointer is valid before another CPU sees
219          * the ops pointer included into the list.
220          */
221         rcu_assign_pointer(*list, ops);
222 }
223
224 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
225 {
226         struct ftrace_ops **p;
227
228         /*
229          * If we are removing the last function, then simply point
230          * to the ftrace_stub.
231          */
232         if (*list == ops && ops->next == &ftrace_list_end) {
233                 *list = &ftrace_list_end;
234                 return 0;
235         }
236
237         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
238                 if (*p == ops)
239                         break;
240
241         if (*p != ops)
242                 return -1;
243
244         *p = (*p)->next;
245         return 0;
246 }
247
248 static int __register_ftrace_function(struct ftrace_ops *ops)
249 {
250         if (ftrace_disabled)
251                 return -ENODEV;
252
253         if (FTRACE_WARN_ON(ops == &global_ops))
254                 return -EINVAL;
255
256         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
257                 return -EBUSY;
258
259         if (!core_kernel_data((unsigned long)ops))
260                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
261
262         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
263                 int first = ftrace_global_list == &ftrace_list_end;
264                 add_ftrace_ops(&ftrace_global_list, ops);
265                 ops->flags |= FTRACE_OPS_FL_ENABLED;
266                 if (first)
267                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
268         } else
269                 add_ftrace_ops(&ftrace_ops_list, ops);
270
271         if (ftrace_enabled)
272                 update_ftrace_function();
273
274         return 0;
275 }
276
277 static int __unregister_ftrace_function(struct ftrace_ops *ops)
278 {
279         int ret;
280
281         if (ftrace_disabled)
282                 return -ENODEV;
283
284         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
285                 return -EBUSY;
286
287         if (FTRACE_WARN_ON(ops == &global_ops))
288                 return -EINVAL;
289
290         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
291                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
292                 if (!ret && ftrace_global_list == &ftrace_list_end)
293                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
294                 if (!ret)
295                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
296         } else
297                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
298
299         if (ret < 0)
300                 return ret;
301
302         if (ftrace_enabled)
303                 update_ftrace_function();
304
305         /*
306          * Dynamic ops may be freed, we must make sure that all
307          * callers are done before leaving this function.
308          */
309         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
310                 synchronize_sched();
311
312         return 0;
313 }
314
315 static void ftrace_update_pid_func(void)
316 {
317         /* Only do something if we are tracing something */
318         if (ftrace_trace_function == ftrace_stub)
319                 return;
320
321         update_ftrace_function();
322 }
323
324 #ifdef CONFIG_FUNCTION_PROFILER
325 struct ftrace_profile {
326         struct hlist_node               node;
327         unsigned long                   ip;
328         unsigned long                   counter;
329 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
330         unsigned long long              time;
331         unsigned long long              time_squared;
332 #endif
333 };
334
335 struct ftrace_profile_page {
336         struct ftrace_profile_page      *next;
337         unsigned long                   index;
338         struct ftrace_profile           records[];
339 };
340
341 struct ftrace_profile_stat {
342         atomic_t                        disabled;
343         struct hlist_head               *hash;
344         struct ftrace_profile_page      *pages;
345         struct ftrace_profile_page      *start;
346         struct tracer_stat              stat;
347 };
348
349 #define PROFILE_RECORDS_SIZE                                            \
350         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
351
352 #define PROFILES_PER_PAGE                                       \
353         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
354
355 static int ftrace_profile_bits __read_mostly;
356 static int ftrace_profile_enabled __read_mostly;
357
358 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
359 static DEFINE_MUTEX(ftrace_profile_lock);
360
361 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
362
363 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
364
365 static void *
366 function_stat_next(void *v, int idx)
367 {
368         struct ftrace_profile *rec = v;
369         struct ftrace_profile_page *pg;
370
371         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
372
373  again:
374         if (idx != 0)
375                 rec++;
376
377         if ((void *)rec >= (void *)&pg->records[pg->index]) {
378                 pg = pg->next;
379                 if (!pg)
380                         return NULL;
381                 rec = &pg->records[0];
382                 if (!rec->counter)
383                         goto again;
384         }
385
386         return rec;
387 }
388
389 static void *function_stat_start(struct tracer_stat *trace)
390 {
391         struct ftrace_profile_stat *stat =
392                 container_of(trace, struct ftrace_profile_stat, stat);
393
394         if (!stat || !stat->start)
395                 return NULL;
396
397         return function_stat_next(&stat->start->records[0], 0);
398 }
399
400 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
401 /* function graph compares on total time */
402 static int function_stat_cmp(void *p1, void *p2)
403 {
404         struct ftrace_profile *a = p1;
405         struct ftrace_profile *b = p2;
406
407         if (a->time < b->time)
408                 return -1;
409         if (a->time > b->time)
410                 return 1;
411         else
412                 return 0;
413 }
414 #else
415 /* not function graph compares against hits */
416 static int function_stat_cmp(void *p1, void *p2)
417 {
418         struct ftrace_profile *a = p1;
419         struct ftrace_profile *b = p2;
420
421         if (a->counter < b->counter)
422                 return -1;
423         if (a->counter > b->counter)
424                 return 1;
425         else
426                 return 0;
427 }
428 #endif
429
430 static int function_stat_headers(struct seq_file *m)
431 {
432 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
433         seq_printf(m, "  Function                               "
434                    "Hit    Time            Avg             s^2\n"
435                       "  --------                               "
436                    "---    ----            ---             ---\n");
437 #else
438         seq_printf(m, "  Function                               Hit\n"
439                       "  --------                               ---\n");
440 #endif
441         return 0;
442 }
443
444 static int function_stat_show(struct seq_file *m, void *v)
445 {
446         struct ftrace_profile *rec = v;
447         char str[KSYM_SYMBOL_LEN];
448         int ret = 0;
449 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
450         static struct trace_seq s;
451         unsigned long long avg;
452         unsigned long long stddev;
453 #endif
454         mutex_lock(&ftrace_profile_lock);
455
456         /* we raced with function_profile_reset() */
457         if (unlikely(rec->counter == 0)) {
458                 ret = -EBUSY;
459                 goto out;
460         }
461
462         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
463         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
464
465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
466         seq_printf(m, "    ");
467         avg = rec->time;
468         do_div(avg, rec->counter);
469
470         /* Sample standard deviation (s^2) */
471         if (rec->counter <= 1)
472                 stddev = 0;
473         else {
474                 stddev = rec->time_squared - rec->counter * avg * avg;
475                 /*
476                  * Divide only 1000 for ns^2 -> us^2 conversion.
477                  * trace_print_graph_duration will divide 1000 again.
478                  */
479                 do_div(stddev, (rec->counter - 1) * 1000);
480         }
481
482         trace_seq_init(&s);
483         trace_print_graph_duration(rec->time, &s);
484         trace_seq_puts(&s, "    ");
485         trace_print_graph_duration(avg, &s);
486         trace_seq_puts(&s, "    ");
487         trace_print_graph_duration(stddev, &s);
488         trace_print_seq(m, &s);
489 #endif
490         seq_putc(m, '\n');
491 out:
492         mutex_unlock(&ftrace_profile_lock);
493
494         return ret;
495 }
496
497 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
498 {
499         struct ftrace_profile_page *pg;
500
501         pg = stat->pages = stat->start;
502
503         while (pg) {
504                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
505                 pg->index = 0;
506                 pg = pg->next;
507         }
508
509         memset(stat->hash, 0,
510                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
511 }
512
513 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
514 {
515         struct ftrace_profile_page *pg;
516         int functions;
517         int pages;
518         int i;
519
520         /* If we already allocated, do nothing */
521         if (stat->pages)
522                 return 0;
523
524         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
525         if (!stat->pages)
526                 return -ENOMEM;
527
528 #ifdef CONFIG_DYNAMIC_FTRACE
529         functions = ftrace_update_tot_cnt;
530 #else
531         /*
532          * We do not know the number of functions that exist because
533          * dynamic tracing is what counts them. With past experience
534          * we have around 20K functions. That should be more than enough.
535          * It is highly unlikely we will execute every function in
536          * the kernel.
537          */
538         functions = 20000;
539 #endif
540
541         pg = stat->start = stat->pages;
542
543         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
544
545         for (i = 0; i < pages; i++) {
546                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
547                 if (!pg->next)
548                         goto out_free;
549                 pg = pg->next;
550         }
551
552         return 0;
553
554  out_free:
555         pg = stat->start;
556         while (pg) {
557                 unsigned long tmp = (unsigned long)pg;
558
559                 pg = pg->next;
560                 free_page(tmp);
561         }
562
563         free_page((unsigned long)stat->pages);
564         stat->pages = NULL;
565         stat->start = NULL;
566
567         return -ENOMEM;
568 }
569
570 static int ftrace_profile_init_cpu(int cpu)
571 {
572         struct ftrace_profile_stat *stat;
573         int size;
574
575         stat = &per_cpu(ftrace_profile_stats, cpu);
576
577         if (stat->hash) {
578                 /* If the profile is already created, simply reset it */
579                 ftrace_profile_reset(stat);
580                 return 0;
581         }
582
583         /*
584          * We are profiling all functions, but usually only a few thousand
585          * functions are hit. We'll make a hash of 1024 items.
586          */
587         size = FTRACE_PROFILE_HASH_SIZE;
588
589         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
590
591         if (!stat->hash)
592                 return -ENOMEM;
593
594         if (!ftrace_profile_bits) {
595                 size--;
596
597                 for (; size; size >>= 1)
598                         ftrace_profile_bits++;
599         }
600
601         /* Preallocate the function profiling pages */
602         if (ftrace_profile_pages_init(stat) < 0) {
603                 kfree(stat->hash);
604                 stat->hash = NULL;
605                 return -ENOMEM;
606         }
607
608         return 0;
609 }
610
611 static int ftrace_profile_init(void)
612 {
613         int cpu;
614         int ret = 0;
615
616         for_each_online_cpu(cpu) {
617                 ret = ftrace_profile_init_cpu(cpu);
618                 if (ret)
619                         break;
620         }
621
622         return ret;
623 }
624
625 /* interrupts must be disabled */
626 static struct ftrace_profile *
627 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
628 {
629         struct ftrace_profile *rec;
630         struct hlist_head *hhd;
631         struct hlist_node *n;
632         unsigned long key;
633
634         key = hash_long(ip, ftrace_profile_bits);
635         hhd = &stat->hash[key];
636
637         if (hlist_empty(hhd))
638                 return NULL;
639
640         hlist_for_each_entry_rcu(rec, n, hhd, node) {
641                 if (rec->ip == ip)
642                         return rec;
643         }
644
645         return NULL;
646 }
647
648 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
649                                struct ftrace_profile *rec)
650 {
651         unsigned long key;
652
653         key = hash_long(rec->ip, ftrace_profile_bits);
654         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
655 }
656
657 /*
658  * The memory is already allocated, this simply finds a new record to use.
659  */
660 static struct ftrace_profile *
661 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
662 {
663         struct ftrace_profile *rec = NULL;
664
665         /* prevent recursion (from NMIs) */
666         if (atomic_inc_return(&stat->disabled) != 1)
667                 goto out;
668
669         /*
670          * Try to find the function again since an NMI
671          * could have added it
672          */
673         rec = ftrace_find_profiled_func(stat, ip);
674         if (rec)
675                 goto out;
676
677         if (stat->pages->index == PROFILES_PER_PAGE) {
678                 if (!stat->pages->next)
679                         goto out;
680                 stat->pages = stat->pages->next;
681         }
682
683         rec = &stat->pages->records[stat->pages->index++];
684         rec->ip = ip;
685         ftrace_add_profile(stat, rec);
686
687  out:
688         atomic_dec(&stat->disabled);
689
690         return rec;
691 }
692
693 static void
694 function_profile_call(unsigned long ip, unsigned long parent_ip)
695 {
696         struct ftrace_profile_stat *stat;
697         struct ftrace_profile *rec;
698         unsigned long flags;
699
700         if (!ftrace_profile_enabled)
701                 return;
702
703         local_irq_save(flags);
704
705         stat = &__get_cpu_var(ftrace_profile_stats);
706         if (!stat->hash || !ftrace_profile_enabled)
707                 goto out;
708
709         rec = ftrace_find_profiled_func(stat, ip);
710         if (!rec) {
711                 rec = ftrace_profile_alloc(stat, ip);
712                 if (!rec)
713                         goto out;
714         }
715
716         rec->counter++;
717  out:
718         local_irq_restore(flags);
719 }
720
721 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
722 static int profile_graph_entry(struct ftrace_graph_ent *trace)
723 {
724         function_profile_call(trace->func, 0);
725         return 1;
726 }
727
728 static void profile_graph_return(struct ftrace_graph_ret *trace)
729 {
730         struct ftrace_profile_stat *stat;
731         unsigned long long calltime;
732         struct ftrace_profile *rec;
733         unsigned long flags;
734
735         local_irq_save(flags);
736         stat = &__get_cpu_var(ftrace_profile_stats);
737         if (!stat->hash || !ftrace_profile_enabled)
738                 goto out;
739
740         /* If the calltime was zero'd ignore it */
741         if (!trace->calltime)
742                 goto out;
743
744         calltime = trace->rettime - trace->calltime;
745
746         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
747                 int index;
748
749                 index = trace->depth;
750
751                 /* Append this call time to the parent time to subtract */
752                 if (index)
753                         current->ret_stack[index - 1].subtime += calltime;
754
755                 if (current->ret_stack[index].subtime < calltime)
756                         calltime -= current->ret_stack[index].subtime;
757                 else
758                         calltime = 0;
759         }
760
761         rec = ftrace_find_profiled_func(stat, trace->func);
762         if (rec) {
763                 rec->time += calltime;
764                 rec->time_squared += calltime * calltime;
765         }
766
767  out:
768         local_irq_restore(flags);
769 }
770
771 static int register_ftrace_profiler(void)
772 {
773         return register_ftrace_graph(&profile_graph_return,
774                                      &profile_graph_entry);
775 }
776
777 static void unregister_ftrace_profiler(void)
778 {
779         unregister_ftrace_graph();
780 }
781 #else
782 static struct ftrace_ops ftrace_profile_ops __read_mostly =
783 {
784         .func           = function_profile_call,
785 };
786
787 static int register_ftrace_profiler(void)
788 {
789         return register_ftrace_function(&ftrace_profile_ops);
790 }
791
792 static void unregister_ftrace_profiler(void)
793 {
794         unregister_ftrace_function(&ftrace_profile_ops);
795 }
796 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
797
798 static ssize_t
799 ftrace_profile_write(struct file *filp, const char __user *ubuf,
800                      size_t cnt, loff_t *ppos)
801 {
802         unsigned long val;
803         char buf[64];           /* big enough to hold a number */
804         int ret;
805
806         if (cnt >= sizeof(buf))
807                 return -EINVAL;
808
809         if (copy_from_user(&buf, ubuf, cnt))
810                 return -EFAULT;
811
812         buf[cnt] = 0;
813
814         ret = strict_strtoul(buf, 10, &val);
815         if (ret < 0)
816                 return ret;
817
818         val = !!val;
819
820         mutex_lock(&ftrace_profile_lock);
821         if (ftrace_profile_enabled ^ val) {
822                 if (val) {
823                         ret = ftrace_profile_init();
824                         if (ret < 0) {
825                                 cnt = ret;
826                                 goto out;
827                         }
828
829                         ret = register_ftrace_profiler();
830                         if (ret < 0) {
831                                 cnt = ret;
832                                 goto out;
833                         }
834                         ftrace_profile_enabled = 1;
835                 } else {
836                         ftrace_profile_enabled = 0;
837                         /*
838                          * unregister_ftrace_profiler calls stop_machine
839                          * so this acts like an synchronize_sched.
840                          */
841                         unregister_ftrace_profiler();
842                 }
843         }
844  out:
845         mutex_unlock(&ftrace_profile_lock);
846
847         *ppos += cnt;
848
849         return cnt;
850 }
851
852 static ssize_t
853 ftrace_profile_read(struct file *filp, char __user *ubuf,
854                      size_t cnt, loff_t *ppos)
855 {
856         char buf[64];           /* big enough to hold a number */
857         int r;
858
859         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
860         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
861 }
862
863 static const struct file_operations ftrace_profile_fops = {
864         .open           = tracing_open_generic,
865         .read           = ftrace_profile_read,
866         .write          = ftrace_profile_write,
867         .llseek         = default_llseek,
868 };
869
870 /* used to initialize the real stat files */
871 static struct tracer_stat function_stats __initdata = {
872         .name           = "functions",
873         .stat_start     = function_stat_start,
874         .stat_next      = function_stat_next,
875         .stat_cmp       = function_stat_cmp,
876         .stat_headers   = function_stat_headers,
877         .stat_show      = function_stat_show
878 };
879
880 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
881 {
882         struct ftrace_profile_stat *stat;
883         struct dentry *entry;
884         char *name;
885         int ret;
886         int cpu;
887
888         for_each_possible_cpu(cpu) {
889                 stat = &per_cpu(ftrace_profile_stats, cpu);
890
891                 /* allocate enough for function name + cpu number */
892                 name = kmalloc(32, GFP_KERNEL);
893                 if (!name) {
894                         /*
895                          * The files created are permanent, if something happens
896                          * we still do not free memory.
897                          */
898                         WARN(1,
899                              "Could not allocate stat file for cpu %d\n",
900                              cpu);
901                         return;
902                 }
903                 stat->stat = function_stats;
904                 snprintf(name, 32, "function%d", cpu);
905                 stat->stat.name = name;
906                 ret = register_stat_tracer(&stat->stat);
907                 if (ret) {
908                         WARN(1,
909                              "Could not register function stat for cpu %d\n",
910                              cpu);
911                         kfree(name);
912                         return;
913                 }
914         }
915
916         entry = debugfs_create_file("function_profile_enabled", 0644,
917                                     d_tracer, NULL, &ftrace_profile_fops);
918         if (!entry)
919                 pr_warning("Could not create debugfs "
920                            "'function_profile_enabled' entry\n");
921 }
922
923 #else /* CONFIG_FUNCTION_PROFILER */
924 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
925 {
926 }
927 #endif /* CONFIG_FUNCTION_PROFILER */
928
929 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
930
931 #ifdef CONFIG_DYNAMIC_FTRACE
932
933 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
934 # error Dynamic ftrace depends on MCOUNT_RECORD
935 #endif
936
937 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
938
939 struct ftrace_func_probe {
940         struct hlist_node       node;
941         struct ftrace_probe_ops *ops;
942         unsigned long           flags;
943         unsigned long           ip;
944         void                    *data;
945         struct rcu_head         rcu;
946 };
947
948 enum {
949         FTRACE_ENABLE_CALLS             = (1 << 0),
950         FTRACE_DISABLE_CALLS            = (1 << 1),
951         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
952         FTRACE_START_FUNC_RET           = (1 << 3),
953         FTRACE_STOP_FUNC_RET            = (1 << 4),
954 };
955 struct ftrace_func_entry {
956         struct hlist_node hlist;
957         unsigned long ip;
958 };
959
960 struct ftrace_hash {
961         unsigned long           size_bits;
962         struct hlist_head       *buckets;
963         unsigned long           count;
964         struct rcu_head         rcu;
965 };
966
967 /*
968  * We make these constant because no one should touch them,
969  * but they are used as the default "empty hash", to avoid allocating
970  * it all the time. These are in a read only section such that if
971  * anyone does try to modify it, it will cause an exception.
972  */
973 static const struct hlist_head empty_buckets[1];
974 static const struct ftrace_hash empty_hash = {
975         .buckets = (struct hlist_head *)empty_buckets,
976 };
977 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
978
979 static struct ftrace_ops global_ops = {
980         .func                   = ftrace_stub,
981         .notrace_hash           = EMPTY_HASH,
982         .filter_hash            = EMPTY_HASH,
983 };
984
985 static struct dyn_ftrace *ftrace_new_addrs;
986
987 static DEFINE_MUTEX(ftrace_regex_lock);
988
989 struct ftrace_page {
990         struct ftrace_page      *next;
991         int                     index;
992         struct dyn_ftrace       records[];
993 };
994
995 #define ENTRIES_PER_PAGE \
996   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
997
998 /* estimate from running different kernels */
999 #define NR_TO_INIT              10000
1000
1001 static struct ftrace_page       *ftrace_pages_start;
1002 static struct ftrace_page       *ftrace_pages;
1003
1004 static struct dyn_ftrace *ftrace_free_records;
1005
1006 static struct ftrace_func_entry *
1007 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1008 {
1009         unsigned long key;
1010         struct ftrace_func_entry *entry;
1011         struct hlist_head *hhd;
1012         struct hlist_node *n;
1013
1014         if (!hash->count)
1015                 return NULL;
1016
1017         if (hash->size_bits > 0)
1018                 key = hash_long(ip, hash->size_bits);
1019         else
1020                 key = 0;
1021
1022         hhd = &hash->buckets[key];
1023
1024         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1025                 if (entry->ip == ip)
1026                         return entry;
1027         }
1028         return NULL;
1029 }
1030
1031 static void __add_hash_entry(struct ftrace_hash *hash,
1032                              struct ftrace_func_entry *entry)
1033 {
1034         struct hlist_head *hhd;
1035         unsigned long key;
1036
1037         if (hash->size_bits)
1038                 key = hash_long(entry->ip, hash->size_bits);
1039         else
1040                 key = 0;
1041
1042         hhd = &hash->buckets[key];
1043         hlist_add_head(&entry->hlist, hhd);
1044         hash->count++;
1045 }
1046
1047 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1048 {
1049         struct ftrace_func_entry *entry;
1050
1051         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1052         if (!entry)
1053                 return -ENOMEM;
1054
1055         entry->ip = ip;
1056         __add_hash_entry(hash, entry);
1057
1058         return 0;
1059 }
1060
1061 static void
1062 free_hash_entry(struct ftrace_hash *hash,
1063                   struct ftrace_func_entry *entry)
1064 {
1065         hlist_del(&entry->hlist);
1066         kfree(entry);
1067         hash->count--;
1068 }
1069
1070 static void
1071 remove_hash_entry(struct ftrace_hash *hash,
1072                   struct ftrace_func_entry *entry)
1073 {
1074         hlist_del(&entry->hlist);
1075         hash->count--;
1076 }
1077
1078 static void ftrace_hash_clear(struct ftrace_hash *hash)
1079 {
1080         struct hlist_head *hhd;
1081         struct hlist_node *tp, *tn;
1082         struct ftrace_func_entry *entry;
1083         int size = 1 << hash->size_bits;
1084         int i;
1085
1086         if (!hash->count)
1087                 return;
1088
1089         for (i = 0; i < size; i++) {
1090                 hhd = &hash->buckets[i];
1091                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1092                         free_hash_entry(hash, entry);
1093         }
1094         FTRACE_WARN_ON(hash->count);
1095 }
1096
1097 static void free_ftrace_hash(struct ftrace_hash *hash)
1098 {
1099         if (!hash || hash == EMPTY_HASH)
1100                 return;
1101         ftrace_hash_clear(hash);
1102         kfree(hash->buckets);
1103         kfree(hash);
1104 }
1105
1106 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1107 {
1108         struct ftrace_hash *hash;
1109
1110         hash = container_of(rcu, struct ftrace_hash, rcu);
1111         free_ftrace_hash(hash);
1112 }
1113
1114 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1115 {
1116         if (!hash || hash == EMPTY_HASH)
1117                 return;
1118         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1119 }
1120
1121 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1122 {
1123         struct ftrace_hash *hash;
1124         int size;
1125
1126         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1127         if (!hash)
1128                 return NULL;
1129
1130         size = 1 << size_bits;
1131         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1132
1133         if (!hash->buckets) {
1134                 kfree(hash);
1135                 return NULL;
1136         }
1137
1138         hash->size_bits = size_bits;
1139
1140         return hash;
1141 }
1142
1143 static struct ftrace_hash *
1144 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1145 {
1146         struct ftrace_func_entry *entry;
1147         struct ftrace_hash *new_hash;
1148         struct hlist_node *tp;
1149         int size;
1150         int ret;
1151         int i;
1152
1153         new_hash = alloc_ftrace_hash(size_bits);
1154         if (!new_hash)
1155                 return NULL;
1156
1157         /* Empty hash? */
1158         if (!hash || !hash->count)
1159                 return new_hash;
1160
1161         size = 1 << hash->size_bits;
1162         for (i = 0; i < size; i++) {
1163                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1164                         ret = add_hash_entry(new_hash, entry->ip);
1165                         if (ret < 0)
1166                                 goto free_hash;
1167                 }
1168         }
1169
1170         FTRACE_WARN_ON(new_hash->count != hash->count);
1171
1172         return new_hash;
1173
1174  free_hash:
1175         free_ftrace_hash(new_hash);
1176         return NULL;
1177 }
1178
1179 static int
1180 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1181 {
1182         struct ftrace_func_entry *entry;
1183         struct hlist_node *tp, *tn;
1184         struct hlist_head *hhd;
1185         struct ftrace_hash *old_hash;
1186         struct ftrace_hash *new_hash;
1187         unsigned long key;
1188         int size = src->count;
1189         int bits = 0;
1190         int i;
1191
1192         /*
1193          * If the new source is empty, just free dst and assign it
1194          * the empty_hash.
1195          */
1196         if (!src->count) {
1197                 free_ftrace_hash_rcu(*dst);
1198                 rcu_assign_pointer(*dst, EMPTY_HASH);
1199                 return 0;
1200         }
1201
1202         /*
1203          * Make the hash size about 1/2 the # found
1204          */
1205         for (size /= 2; size; size >>= 1)
1206                 bits++;
1207
1208         /* Don't allocate too much */
1209         if (bits > FTRACE_HASH_MAX_BITS)
1210                 bits = FTRACE_HASH_MAX_BITS;
1211
1212         new_hash = alloc_ftrace_hash(bits);
1213         if (!new_hash)
1214                 return -ENOMEM;
1215
1216         size = 1 << src->size_bits;
1217         for (i = 0; i < size; i++) {
1218                 hhd = &src->buckets[i];
1219                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1220                         if (bits > 0)
1221                                 key = hash_long(entry->ip, bits);
1222                         else
1223                                 key = 0;
1224                         remove_hash_entry(src, entry);
1225                         __add_hash_entry(new_hash, entry);
1226                 }
1227         }
1228
1229         old_hash = *dst;
1230         rcu_assign_pointer(*dst, new_hash);
1231         free_ftrace_hash_rcu(old_hash);
1232
1233         return 0;
1234 }
1235
1236 /*
1237  * Test the hashes for this ops to see if we want to call
1238  * the ops->func or not.
1239  *
1240  * It's a match if the ip is in the ops->filter_hash or
1241  * the filter_hash does not exist or is empty,
1242  *  AND
1243  * the ip is not in the ops->notrace_hash.
1244  *
1245  * This needs to be called with preemption disabled as
1246  * the hashes are freed with call_rcu_sched().
1247  */
1248 static int
1249 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1250 {
1251         struct ftrace_hash *filter_hash;
1252         struct ftrace_hash *notrace_hash;
1253         int ret;
1254
1255         filter_hash = rcu_dereference_raw(ops->filter_hash);
1256         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1257
1258         if ((!filter_hash || !filter_hash->count ||
1259              ftrace_lookup_ip(filter_hash, ip)) &&
1260             (!notrace_hash || !notrace_hash->count ||
1261              !ftrace_lookup_ip(notrace_hash, ip)))
1262                 ret = 1;
1263         else
1264                 ret = 0;
1265
1266         return ret;
1267 }
1268
1269 /*
1270  * This is a double for. Do not use 'break' to break out of the loop,
1271  * you must use a goto.
1272  */
1273 #define do_for_each_ftrace_rec(pg, rec)                                 \
1274         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1275                 int _____i;                                             \
1276                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1277                         rec = &pg->records[_____i];
1278
1279 #define while_for_each_ftrace_rec()             \
1280                 }                               \
1281         }
1282
1283 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1284                                      int filter_hash,
1285                                      bool inc)
1286 {
1287         struct ftrace_hash *hash;
1288         struct ftrace_hash *other_hash;
1289         struct ftrace_page *pg;
1290         struct dyn_ftrace *rec;
1291         int count = 0;
1292         int all = 0;
1293
1294         /* Only update if the ops has been registered */
1295         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1296                 return;
1297
1298         /*
1299          * In the filter_hash case:
1300          *   If the count is zero, we update all records.
1301          *   Otherwise we just update the items in the hash.
1302          *
1303          * In the notrace_hash case:
1304          *   We enable the update in the hash.
1305          *   As disabling notrace means enabling the tracing,
1306          *   and enabling notrace means disabling, the inc variable
1307          *   gets inversed.
1308          */
1309         if (filter_hash) {
1310                 hash = ops->filter_hash;
1311                 other_hash = ops->notrace_hash;
1312                 if (!hash || !hash->count)
1313                         all = 1;
1314         } else {
1315                 inc = !inc;
1316                 hash = ops->notrace_hash;
1317                 other_hash = ops->filter_hash;
1318                 /*
1319                  * If the notrace hash has no items,
1320                  * then there's nothing to do.
1321                  */
1322                 if (hash && !hash->count)
1323                         return;
1324         }
1325
1326         do_for_each_ftrace_rec(pg, rec) {
1327                 int in_other_hash = 0;
1328                 int in_hash = 0;
1329                 int match = 0;
1330
1331                 if (all) {
1332                         /*
1333                          * Only the filter_hash affects all records.
1334                          * Update if the record is not in the notrace hash.
1335                          */
1336                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1337                                 match = 1;
1338                 } else {
1339                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1340                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1341
1342                         /*
1343                          *
1344                          */
1345                         if (filter_hash && in_hash && !in_other_hash)
1346                                 match = 1;
1347                         else if (!filter_hash && in_hash &&
1348                                  (in_other_hash || !other_hash->count))
1349                                 match = 1;
1350                 }
1351                 if (!match)
1352                         continue;
1353
1354                 if (inc) {
1355                         rec->flags++;
1356                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1357                                 return;
1358                 } else {
1359                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1360                                 return;
1361                         rec->flags--;
1362                 }
1363                 count++;
1364                 /* Shortcut, if we handled all records, we are done. */
1365                 if (!all && count == hash->count)
1366                         return;
1367         } while_for_each_ftrace_rec();
1368 }
1369
1370 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1371                                     int filter_hash)
1372 {
1373         __ftrace_hash_rec_update(ops, filter_hash, 0);
1374 }
1375
1376 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1377                                    int filter_hash)
1378 {
1379         __ftrace_hash_rec_update(ops, filter_hash, 1);
1380 }
1381
1382 static void ftrace_free_rec(struct dyn_ftrace *rec)
1383 {
1384         rec->freelist = ftrace_free_records;
1385         ftrace_free_records = rec;
1386         rec->flags |= FTRACE_FL_FREE;
1387 }
1388
1389 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1390 {
1391         struct dyn_ftrace *rec;
1392
1393         /* First check for freed records */
1394         if (ftrace_free_records) {
1395                 rec = ftrace_free_records;
1396
1397                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1398                         FTRACE_WARN_ON_ONCE(1);
1399                         ftrace_free_records = NULL;
1400                         return NULL;
1401                 }
1402
1403                 ftrace_free_records = rec->freelist;
1404                 memset(rec, 0, sizeof(*rec));
1405                 return rec;
1406         }
1407
1408         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1409                 if (!ftrace_pages->next) {
1410                         /* allocate another page */
1411                         ftrace_pages->next =
1412                                 (void *)get_zeroed_page(GFP_KERNEL);
1413                         if (!ftrace_pages->next)
1414                                 return NULL;
1415                 }
1416                 ftrace_pages = ftrace_pages->next;
1417         }
1418
1419         return &ftrace_pages->records[ftrace_pages->index++];
1420 }
1421
1422 static struct dyn_ftrace *
1423 ftrace_record_ip(unsigned long ip)
1424 {
1425         struct dyn_ftrace *rec;
1426
1427         if (ftrace_disabled)
1428                 return NULL;
1429
1430         rec = ftrace_alloc_dyn_node(ip);
1431         if (!rec)
1432                 return NULL;
1433
1434         rec->ip = ip;
1435         rec->newlist = ftrace_new_addrs;
1436         ftrace_new_addrs = rec;
1437
1438         return rec;
1439 }
1440
1441 static void print_ip_ins(const char *fmt, unsigned char *p)
1442 {
1443         int i;
1444
1445         printk(KERN_CONT "%s", fmt);
1446
1447         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1448                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1449 }
1450
1451 static void ftrace_bug(int failed, unsigned long ip)
1452 {
1453         switch (failed) {
1454         case -EFAULT:
1455                 FTRACE_WARN_ON_ONCE(1);
1456                 pr_info("ftrace faulted on modifying ");
1457                 print_ip_sym(ip);
1458                 break;
1459         case -EINVAL:
1460                 FTRACE_WARN_ON_ONCE(1);
1461                 pr_info("ftrace failed to modify ");
1462                 print_ip_sym(ip);
1463                 print_ip_ins(" actual: ", (unsigned char *)ip);
1464                 printk(KERN_CONT "\n");
1465                 break;
1466         case -EPERM:
1467                 FTRACE_WARN_ON_ONCE(1);
1468                 pr_info("ftrace faulted on writing ");
1469                 print_ip_sym(ip);
1470                 break;
1471         default:
1472                 FTRACE_WARN_ON_ONCE(1);
1473                 pr_info("ftrace faulted on unknown error ");
1474                 print_ip_sym(ip);
1475         }
1476 }
1477
1478
1479 /* Return 1 if the address range is reserved for ftrace */
1480 int ftrace_text_reserved(void *start, void *end)
1481 {
1482         struct dyn_ftrace *rec;
1483         struct ftrace_page *pg;
1484
1485         do_for_each_ftrace_rec(pg, rec) {
1486                 if (rec->ip <= (unsigned long)end &&
1487                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1488                         return 1;
1489         } while_for_each_ftrace_rec();
1490         return 0;
1491 }
1492
1493
1494 static int
1495 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1496 {
1497         unsigned long ftrace_addr;
1498         unsigned long flag = 0UL;
1499
1500         ftrace_addr = (unsigned long)FTRACE_ADDR;
1501
1502         /*
1503          * If we are enabling tracing:
1504          *
1505          *   If the record has a ref count, then we need to enable it
1506          *   because someone is using it.
1507          *
1508          *   Otherwise we make sure its disabled.
1509          *
1510          * If we are disabling tracing, then disable all records that
1511          * are enabled.
1512          */
1513         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1514                 flag = FTRACE_FL_ENABLED;
1515
1516         /* If the state of this record hasn't changed, then do nothing */
1517         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1518                 return 0;
1519
1520         if (flag) {
1521                 rec->flags |= FTRACE_FL_ENABLED;
1522                 return ftrace_make_call(rec, ftrace_addr);
1523         }
1524
1525         rec->flags &= ~FTRACE_FL_ENABLED;
1526         return ftrace_make_nop(NULL, rec, ftrace_addr);
1527 }
1528
1529 static void ftrace_replace_code(int enable)
1530 {
1531         struct dyn_ftrace *rec;
1532         struct ftrace_page *pg;
1533         int failed;
1534
1535         if (unlikely(ftrace_disabled))
1536                 return;
1537
1538         do_for_each_ftrace_rec(pg, rec) {
1539                 /* Skip over free records */
1540                 if (rec->flags & FTRACE_FL_FREE)
1541                         continue;
1542
1543                 failed = __ftrace_replace_code(rec, enable);
1544                 if (failed) {
1545                         ftrace_bug(failed, rec->ip);
1546                         /* Stop processing */
1547                         return;
1548                 }
1549         } while_for_each_ftrace_rec();
1550 }
1551
1552 static int
1553 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1554 {
1555         unsigned long ip;
1556         int ret;
1557
1558         ip = rec->ip;
1559
1560         if (unlikely(ftrace_disabled))
1561                 return 0;
1562
1563         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1564         if (ret) {
1565                 ftrace_bug(ret, ip);
1566                 return 0;
1567         }
1568         return 1;
1569 }
1570
1571 /*
1572  * archs can override this function if they must do something
1573  * before the modifying code is performed.
1574  */
1575 int __weak ftrace_arch_code_modify_prepare(void)
1576 {
1577         return 0;
1578 }
1579
1580 /*
1581  * archs can override this function if they must do something
1582  * after the modifying code is performed.
1583  */
1584 int __weak ftrace_arch_code_modify_post_process(void)
1585 {
1586         return 0;
1587 }
1588
1589 static int __ftrace_modify_code(void *data)
1590 {
1591         int *command = data;
1592
1593         if (*command & FTRACE_ENABLE_CALLS)
1594                 ftrace_replace_code(1);
1595         else if (*command & FTRACE_DISABLE_CALLS)
1596                 ftrace_replace_code(0);
1597
1598         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1599                 ftrace_update_ftrace_func(ftrace_trace_function);
1600
1601         if (*command & FTRACE_START_FUNC_RET)
1602                 ftrace_enable_ftrace_graph_caller();
1603         else if (*command & FTRACE_STOP_FUNC_RET)
1604                 ftrace_disable_ftrace_graph_caller();
1605
1606         return 0;
1607 }
1608
1609 static void ftrace_run_update_code(int command)
1610 {
1611         int ret;
1612
1613         ret = ftrace_arch_code_modify_prepare();
1614         FTRACE_WARN_ON(ret);
1615         if (ret)
1616                 return;
1617
1618         stop_machine(__ftrace_modify_code, &command, NULL);
1619
1620         ret = ftrace_arch_code_modify_post_process();
1621         FTRACE_WARN_ON(ret);
1622 }
1623
1624 static ftrace_func_t saved_ftrace_func;
1625 static int ftrace_start_up;
1626 static int global_start_up;
1627
1628 static void ftrace_startup_enable(int command)
1629 {
1630         if (saved_ftrace_func != ftrace_trace_function) {
1631                 saved_ftrace_func = ftrace_trace_function;
1632                 command |= FTRACE_UPDATE_TRACE_FUNC;
1633         }
1634
1635         if (!command || !ftrace_enabled)
1636                 return;
1637
1638         ftrace_run_update_code(command);
1639 }
1640
1641 static void ftrace_startup(struct ftrace_ops *ops, int command)
1642 {
1643         bool hash_enable = true;
1644
1645         if (unlikely(ftrace_disabled))
1646                 return;
1647
1648         ftrace_start_up++;
1649         command |= FTRACE_ENABLE_CALLS;
1650
1651         /* ops marked global share the filter hashes */
1652         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1653                 ops = &global_ops;
1654                 /* Don't update hash if global is already set */
1655                 if (global_start_up)
1656                         hash_enable = false;
1657                 global_start_up++;
1658         }
1659
1660         ops->flags |= FTRACE_OPS_FL_ENABLED;
1661         if (hash_enable)
1662                 ftrace_hash_rec_enable(ops, 1);
1663
1664         ftrace_startup_enable(command);
1665 }
1666
1667 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1668 {
1669         bool hash_disable = true;
1670
1671         if (unlikely(ftrace_disabled))
1672                 return;
1673
1674         ftrace_start_up--;
1675         /*
1676          * Just warn in case of unbalance, no need to kill ftrace, it's not
1677          * critical but the ftrace_call callers may be never nopped again after
1678          * further ftrace uses.
1679          */
1680         WARN_ON_ONCE(ftrace_start_up < 0);
1681
1682         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1683                 ops = &global_ops;
1684                 global_start_up--;
1685                 WARN_ON_ONCE(global_start_up < 0);
1686                 /* Don't update hash if global still has users */
1687                 if (global_start_up) {
1688                         WARN_ON_ONCE(!ftrace_start_up);
1689                         hash_disable = false;
1690                 }
1691         }
1692
1693         if (hash_disable)
1694                 ftrace_hash_rec_disable(ops, 1);
1695
1696         if (ops != &global_ops || !global_start_up)
1697                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1698
1699         if (!ftrace_start_up)
1700                 command |= FTRACE_DISABLE_CALLS;
1701
1702         if (saved_ftrace_func != ftrace_trace_function) {
1703                 saved_ftrace_func = ftrace_trace_function;
1704                 command |= FTRACE_UPDATE_TRACE_FUNC;
1705         }
1706
1707         if (!command || !ftrace_enabled)
1708                 return;
1709
1710         ftrace_run_update_code(command);
1711 }
1712
1713 static void ftrace_startup_sysctl(void)
1714 {
1715         if (unlikely(ftrace_disabled))
1716                 return;
1717
1718         /* Force update next time */
1719         saved_ftrace_func = NULL;
1720         /* ftrace_start_up is true if we want ftrace running */
1721         if (ftrace_start_up)
1722                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1723 }
1724
1725 static void ftrace_shutdown_sysctl(void)
1726 {
1727         if (unlikely(ftrace_disabled))
1728                 return;
1729
1730         /* ftrace_start_up is true if ftrace is running */
1731         if (ftrace_start_up)
1732                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1733 }
1734
1735 static cycle_t          ftrace_update_time;
1736 static unsigned long    ftrace_update_cnt;
1737 unsigned long           ftrace_update_tot_cnt;
1738
1739 static int ftrace_update_code(struct module *mod)
1740 {
1741         struct dyn_ftrace *p;
1742         cycle_t start, stop;
1743
1744         start = ftrace_now(raw_smp_processor_id());
1745         ftrace_update_cnt = 0;
1746
1747         while (ftrace_new_addrs) {
1748
1749                 /* If something went wrong, bail without enabling anything */
1750                 if (unlikely(ftrace_disabled))
1751                         return -1;
1752
1753                 p = ftrace_new_addrs;
1754                 ftrace_new_addrs = p->newlist;
1755                 p->flags = 0L;
1756
1757                 /*
1758                  * Do the initial record conversion from mcount jump
1759                  * to the NOP instructions.
1760                  */
1761                 if (!ftrace_code_disable(mod, p)) {
1762                         ftrace_free_rec(p);
1763                         /* Game over */
1764                         break;
1765                 }
1766
1767                 ftrace_update_cnt++;
1768
1769                 /*
1770                  * If the tracing is enabled, go ahead and enable the record.
1771                  *
1772                  * The reason not to enable the record immediatelly is the
1773                  * inherent check of ftrace_make_nop/ftrace_make_call for
1774                  * correct previous instructions.  Making first the NOP
1775                  * conversion puts the module to the correct state, thus
1776                  * passing the ftrace_make_call check.
1777                  */
1778                 if (ftrace_start_up) {
1779                         int failed = __ftrace_replace_code(p, 1);
1780                         if (failed) {
1781                                 ftrace_bug(failed, p->ip);
1782                                 ftrace_free_rec(p);
1783                         }
1784                 }
1785         }
1786
1787         stop = ftrace_now(raw_smp_processor_id());
1788         ftrace_update_time = stop - start;
1789         ftrace_update_tot_cnt += ftrace_update_cnt;
1790
1791         return 0;
1792 }
1793
1794 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1795 {
1796         struct ftrace_page *pg;
1797         int cnt;
1798         int i;
1799
1800         /* allocate a few pages */
1801         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1802         if (!ftrace_pages_start)
1803                 return -1;
1804
1805         /*
1806          * Allocate a few more pages.
1807          *
1808          * TODO: have some parser search vmlinux before
1809          *   final linking to find all calls to ftrace.
1810          *   Then we can:
1811          *    a) know how many pages to allocate.
1812          *     and/or
1813          *    b) set up the table then.
1814          *
1815          *  The dynamic code is still necessary for
1816          *  modules.
1817          */
1818
1819         pg = ftrace_pages = ftrace_pages_start;
1820
1821         cnt = num_to_init / ENTRIES_PER_PAGE;
1822         pr_info("ftrace: allocating %ld entries in %d pages\n",
1823                 num_to_init, cnt + 1);
1824
1825         for (i = 0; i < cnt; i++) {
1826                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1827
1828                 /* If we fail, we'll try later anyway */
1829                 if (!pg->next)
1830                         break;
1831
1832                 pg = pg->next;
1833         }
1834
1835         return 0;
1836 }
1837
1838 enum {
1839         FTRACE_ITER_FILTER      = (1 << 0),
1840         FTRACE_ITER_NOTRACE     = (1 << 1),
1841         FTRACE_ITER_PRINTALL    = (1 << 2),
1842         FTRACE_ITER_HASH        = (1 << 3),
1843         FTRACE_ITER_ENABLED     = (1 << 4),
1844 };
1845
1846 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1847
1848 struct ftrace_iterator {
1849         loff_t                          pos;
1850         loff_t                          func_pos;
1851         struct ftrace_page              *pg;
1852         struct dyn_ftrace               *func;
1853         struct ftrace_func_probe        *probe;
1854         struct trace_parser             parser;
1855         struct ftrace_hash              *hash;
1856         struct ftrace_ops               *ops;
1857         int                             hidx;
1858         int                             idx;
1859         unsigned                        flags;
1860 };
1861
1862 static void *
1863 t_hash_next(struct seq_file *m, loff_t *pos)
1864 {
1865         struct ftrace_iterator *iter = m->private;
1866         struct hlist_node *hnd = NULL;
1867         struct hlist_head *hhd;
1868
1869         (*pos)++;
1870         iter->pos = *pos;
1871
1872         if (iter->probe)
1873                 hnd = &iter->probe->node;
1874  retry:
1875         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1876                 return NULL;
1877
1878         hhd = &ftrace_func_hash[iter->hidx];
1879
1880         if (hlist_empty(hhd)) {
1881                 iter->hidx++;
1882                 hnd = NULL;
1883                 goto retry;
1884         }
1885
1886         if (!hnd)
1887                 hnd = hhd->first;
1888         else {
1889                 hnd = hnd->next;
1890                 if (!hnd) {
1891                         iter->hidx++;
1892                         goto retry;
1893                 }
1894         }
1895
1896         if (WARN_ON_ONCE(!hnd))
1897                 return NULL;
1898
1899         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1900
1901         return iter;
1902 }
1903
1904 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1905 {
1906         struct ftrace_iterator *iter = m->private;
1907         void *p = NULL;
1908         loff_t l;
1909
1910         if (iter->func_pos > *pos)
1911                 return NULL;
1912
1913         iter->hidx = 0;
1914         for (l = 0; l <= (*pos - iter->func_pos); ) {
1915                 p = t_hash_next(m, &l);
1916                 if (!p)
1917                         break;
1918         }
1919         if (!p)
1920                 return NULL;
1921
1922         /* Only set this if we have an item */
1923         iter->flags |= FTRACE_ITER_HASH;
1924
1925         return iter;
1926 }
1927
1928 static int
1929 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1930 {
1931         struct ftrace_func_probe *rec;
1932
1933         rec = iter->probe;
1934         if (WARN_ON_ONCE(!rec))
1935                 return -EIO;
1936
1937         if (rec->ops->print)
1938                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1939
1940         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1941
1942         if (rec->data)
1943                 seq_printf(m, ":%p", rec->data);
1944         seq_putc(m, '\n');
1945
1946         return 0;
1947 }
1948
1949 static void *
1950 t_next(struct seq_file *m, void *v, loff_t *pos)
1951 {
1952         struct ftrace_iterator *iter = m->private;
1953         struct ftrace_ops *ops = &global_ops;
1954         struct dyn_ftrace *rec = NULL;
1955
1956         if (unlikely(ftrace_disabled))
1957                 return NULL;
1958
1959         if (iter->flags & FTRACE_ITER_HASH)
1960                 return t_hash_next(m, pos);
1961
1962         (*pos)++;
1963         iter->pos = iter->func_pos = *pos;
1964
1965         if (iter->flags & FTRACE_ITER_PRINTALL)
1966                 return t_hash_start(m, pos);
1967
1968  retry:
1969         if (iter->idx >= iter->pg->index) {
1970                 if (iter->pg->next) {
1971                         iter->pg = iter->pg->next;
1972                         iter->idx = 0;
1973                         goto retry;
1974                 }
1975         } else {
1976                 rec = &iter->pg->records[iter->idx++];
1977                 if ((rec->flags & FTRACE_FL_FREE) ||
1978
1979                     ((iter->flags & FTRACE_ITER_FILTER) &&
1980                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1981
1982                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1983                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1984
1985                     ((iter->flags & FTRACE_ITER_ENABLED) &&
1986                      !(rec->flags & ~FTRACE_FL_MASK))) {
1987
1988                         rec = NULL;
1989                         goto retry;
1990                 }
1991         }
1992
1993         if (!rec)
1994                 return t_hash_start(m, pos);
1995
1996         iter->func = rec;
1997
1998         return iter;
1999 }
2000
2001 static void reset_iter_read(struct ftrace_iterator *iter)
2002 {
2003         iter->pos = 0;
2004         iter->func_pos = 0;
2005         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2006 }
2007
2008 static void *t_start(struct seq_file *m, loff_t *pos)
2009 {
2010         struct ftrace_iterator *iter = m->private;
2011         struct ftrace_ops *ops = &global_ops;
2012         void *p = NULL;
2013         loff_t l;
2014
2015         mutex_lock(&ftrace_lock);
2016
2017         if (unlikely(ftrace_disabled))
2018                 return NULL;
2019
2020         /*
2021          * If an lseek was done, then reset and start from beginning.
2022          */
2023         if (*pos < iter->pos)
2024                 reset_iter_read(iter);
2025
2026         /*
2027          * For set_ftrace_filter reading, if we have the filter
2028          * off, we can short cut and just print out that all
2029          * functions are enabled.
2030          */
2031         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2032                 if (*pos > 0)
2033                         return t_hash_start(m, pos);
2034                 iter->flags |= FTRACE_ITER_PRINTALL;
2035                 /* reset in case of seek/pread */
2036                 iter->flags &= ~FTRACE_ITER_HASH;
2037                 return iter;
2038         }
2039
2040         if (iter->flags & FTRACE_ITER_HASH)
2041                 return t_hash_start(m, pos);
2042
2043         /*
2044          * Unfortunately, we need to restart at ftrace_pages_start
2045          * every time we let go of the ftrace_mutex. This is because
2046          * those pointers can change without the lock.
2047          */
2048         iter->pg = ftrace_pages_start;
2049         iter->idx = 0;
2050         for (l = 0; l <= *pos; ) {
2051                 p = t_next(m, p, &l);
2052                 if (!p)
2053                         break;
2054         }
2055
2056         if (!p) {
2057                 if (iter->flags & FTRACE_ITER_FILTER)
2058                         return t_hash_start(m, pos);
2059
2060                 return NULL;
2061         }
2062
2063         return iter;
2064 }
2065
2066 static void t_stop(struct seq_file *m, void *p)
2067 {
2068         mutex_unlock(&ftrace_lock);
2069 }
2070
2071 static int t_show(struct seq_file *m, void *v)
2072 {
2073         struct ftrace_iterator *iter = m->private;
2074         struct dyn_ftrace *rec;
2075
2076         if (iter->flags & FTRACE_ITER_HASH)
2077                 return t_hash_show(m, iter);
2078
2079         if (iter->flags & FTRACE_ITER_PRINTALL) {
2080                 seq_printf(m, "#### all functions enabled ####\n");
2081                 return 0;
2082         }
2083
2084         rec = iter->func;
2085
2086         if (!rec)
2087                 return 0;
2088
2089         seq_printf(m, "%ps", (void *)rec->ip);
2090         if (iter->flags & FTRACE_ITER_ENABLED)
2091                 seq_printf(m, " (%ld)",
2092                            rec->flags & ~FTRACE_FL_MASK);
2093         seq_printf(m, "\n");
2094
2095         return 0;
2096 }
2097
2098 static const struct seq_operations show_ftrace_seq_ops = {
2099         .start = t_start,
2100         .next = t_next,
2101         .stop = t_stop,
2102         .show = t_show,
2103 };
2104
2105 static int
2106 ftrace_avail_open(struct inode *inode, struct file *file)
2107 {
2108         struct ftrace_iterator *iter;
2109         int ret;
2110
2111         if (unlikely(ftrace_disabled))
2112                 return -ENODEV;
2113
2114         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2115         if (!iter)
2116                 return -ENOMEM;
2117
2118         iter->pg = ftrace_pages_start;
2119
2120         ret = seq_open(file, &show_ftrace_seq_ops);
2121         if (!ret) {
2122                 struct seq_file *m = file->private_data;
2123
2124                 m->private = iter;
2125         } else {
2126                 kfree(iter);
2127         }
2128
2129         return ret;
2130 }
2131
2132 static int
2133 ftrace_enabled_open(struct inode *inode, struct file *file)
2134 {
2135         struct ftrace_iterator *iter;
2136         int ret;
2137
2138         if (unlikely(ftrace_disabled))
2139                 return -ENODEV;
2140
2141         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2142         if (!iter)
2143                 return -ENOMEM;
2144
2145         iter->pg = ftrace_pages_start;
2146         iter->flags = FTRACE_ITER_ENABLED;
2147
2148         ret = seq_open(file, &show_ftrace_seq_ops);
2149         if (!ret) {
2150                 struct seq_file *m = file->private_data;
2151
2152                 m->private = iter;
2153         } else {
2154                 kfree(iter);
2155         }
2156
2157         return ret;
2158 }
2159
2160 static void ftrace_filter_reset(struct ftrace_hash *hash)
2161 {
2162         mutex_lock(&ftrace_lock);
2163         ftrace_hash_clear(hash);
2164         mutex_unlock(&ftrace_lock);
2165 }
2166
2167 static int
2168 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2169                   struct inode *inode, struct file *file)
2170 {
2171         struct ftrace_iterator *iter;
2172         struct ftrace_hash *hash;
2173         int ret = 0;
2174
2175         if (unlikely(ftrace_disabled))
2176                 return -ENODEV;
2177
2178         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2179         if (!iter)
2180                 return -ENOMEM;
2181
2182         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2183                 kfree(iter);
2184                 return -ENOMEM;
2185         }
2186
2187         if (flag & FTRACE_ITER_NOTRACE)
2188                 hash = ops->notrace_hash;
2189         else
2190                 hash = ops->filter_hash;
2191
2192         iter->ops = ops;
2193         iter->flags = flag;
2194
2195         if (file->f_mode & FMODE_WRITE) {
2196                 mutex_lock(&ftrace_lock);
2197                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2198                 mutex_unlock(&ftrace_lock);
2199
2200                 if (!iter->hash) {
2201                         trace_parser_put(&iter->parser);
2202                         kfree(iter);
2203                         return -ENOMEM;
2204                 }
2205         }
2206
2207         mutex_lock(&ftrace_regex_lock);
2208
2209         if ((file->f_mode & FMODE_WRITE) &&
2210             (file->f_flags & O_TRUNC))
2211                 ftrace_filter_reset(iter->hash);
2212
2213         if (file->f_mode & FMODE_READ) {
2214                 iter->pg = ftrace_pages_start;
2215
2216                 ret = seq_open(file, &show_ftrace_seq_ops);
2217                 if (!ret) {
2218                         struct seq_file *m = file->private_data;
2219                         m->private = iter;
2220                 } else {
2221                         /* Failed */
2222                         free_ftrace_hash(iter->hash);
2223                         trace_parser_put(&iter->parser);
2224                         kfree(iter);
2225                 }
2226         } else
2227                 file->private_data = iter;
2228         mutex_unlock(&ftrace_regex_lock);
2229
2230         return ret;
2231 }
2232
2233 static int
2234 ftrace_filter_open(struct inode *inode, struct file *file)
2235 {
2236         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2237                                  inode, file);
2238 }
2239
2240 static int
2241 ftrace_notrace_open(struct inode *inode, struct file *file)
2242 {
2243         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2244                                  inode, file);
2245 }
2246
2247 static loff_t
2248 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2249 {
2250         loff_t ret;
2251
2252         if (file->f_mode & FMODE_READ)
2253                 ret = seq_lseek(file, offset, origin);
2254         else
2255                 file->f_pos = ret = 1;
2256
2257         return ret;
2258 }
2259
2260 static int ftrace_match(char *str, char *regex, int len, int type)
2261 {
2262         int matched = 0;
2263         int slen;
2264
2265         switch (type) {
2266         case MATCH_FULL:
2267                 if (strcmp(str, regex) == 0)
2268                         matched = 1;
2269                 break;
2270         case MATCH_FRONT_ONLY:
2271                 if (strncmp(str, regex, len) == 0)
2272                         matched = 1;
2273                 break;
2274         case MATCH_MIDDLE_ONLY:
2275                 if (strstr(str, regex))
2276                         matched = 1;
2277                 break;
2278         case MATCH_END_ONLY:
2279                 slen = strlen(str);
2280                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2281                         matched = 1;
2282                 break;
2283         }
2284
2285         return matched;
2286 }
2287
2288 static int
2289 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2290 {
2291         struct ftrace_func_entry *entry;
2292         int ret = 0;
2293
2294         entry = ftrace_lookup_ip(hash, rec->ip);
2295         if (not) {
2296                 /* Do nothing if it doesn't exist */
2297                 if (!entry)
2298                         return 0;
2299
2300                 free_hash_entry(hash, entry);
2301         } else {
2302                 /* Do nothing if it exists */
2303                 if (entry)
2304                         return 0;
2305
2306                 ret = add_hash_entry(hash, rec->ip);
2307         }
2308         return ret;
2309 }
2310
2311 static int
2312 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2313                     char *regex, int len, int type)
2314 {
2315         char str[KSYM_SYMBOL_LEN];
2316         char *modname;
2317
2318         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2319
2320         if (mod) {
2321                 /* module lookup requires matching the module */
2322                 if (!modname || strcmp(modname, mod))
2323                         return 0;
2324
2325                 /* blank search means to match all funcs in the mod */
2326                 if (!len)
2327                         return 1;
2328         }
2329
2330         return ftrace_match(str, regex, len, type);
2331 }
2332
2333 static int
2334 match_records(struct ftrace_hash *hash, char *buff,
2335               int len, char *mod, int not)
2336 {
2337         unsigned search_len = 0;
2338         struct ftrace_page *pg;
2339         struct dyn_ftrace *rec;
2340         int type = MATCH_FULL;
2341         char *search = buff;
2342         int found = 0;
2343         int ret;
2344
2345         if (len) {
2346                 type = filter_parse_regex(buff, len, &search, &not);
2347                 search_len = strlen(search);
2348         }
2349
2350         mutex_lock(&ftrace_lock);
2351
2352         if (unlikely(ftrace_disabled))
2353                 goto out_unlock;
2354
2355         do_for_each_ftrace_rec(pg, rec) {
2356
2357                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2358                         ret = enter_record(hash, rec, not);
2359                         if (ret < 0) {
2360                                 found = ret;
2361                                 goto out_unlock;
2362                         }
2363                         found = 1;
2364                 }
2365         } while_for_each_ftrace_rec();
2366  out_unlock:
2367         mutex_unlock(&ftrace_lock);
2368
2369         return found;
2370 }
2371
2372 static int
2373 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2374 {
2375         return match_records(hash, buff, len, NULL, 0);
2376 }
2377
2378 static int
2379 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2380 {
2381         int not = 0;
2382
2383         /* blank or '*' mean the same */
2384         if (strcmp(buff, "*") == 0)
2385                 buff[0] = 0;
2386
2387         /* handle the case of 'dont filter this module' */
2388         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2389                 buff[0] = 0;
2390                 not = 1;
2391         }
2392
2393         return match_records(hash, buff, strlen(buff), mod, not);
2394 }
2395
2396 /*
2397  * We register the module command as a template to show others how
2398  * to register the a command as well.
2399  */
2400
2401 static int
2402 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2403 {
2404         struct ftrace_ops *ops = &global_ops;
2405         struct ftrace_hash *hash;
2406         char *mod;
2407         int ret = -EINVAL;
2408
2409         /*
2410          * cmd == 'mod' because we only registered this func
2411          * for the 'mod' ftrace_func_command.
2412          * But if you register one func with multiple commands,
2413          * you can tell which command was used by the cmd
2414          * parameter.
2415          */
2416
2417         /* we must have a module name */
2418         if (!param)
2419                 return ret;
2420
2421         mod = strsep(&param, ":");
2422         if (!strlen(mod))
2423                 return ret;
2424
2425         if (enable)
2426                 hash = ops->filter_hash;
2427         else
2428                 hash = ops->notrace_hash;
2429
2430         ret = ftrace_match_module_records(hash, func, mod);
2431         if (!ret)
2432                 ret = -EINVAL;
2433         if (ret < 0)
2434                 return ret;
2435
2436         return 0;
2437 }
2438
2439 static struct ftrace_func_command ftrace_mod_cmd = {
2440         .name                   = "mod",
2441         .func                   = ftrace_mod_callback,
2442 };
2443
2444 static int __init ftrace_mod_cmd_init(void)
2445 {
2446         return register_ftrace_command(&ftrace_mod_cmd);
2447 }
2448 device_initcall(ftrace_mod_cmd_init);
2449
2450 static void
2451 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2452 {
2453         struct ftrace_func_probe *entry;
2454         struct hlist_head *hhd;
2455         struct hlist_node *n;
2456         unsigned long key;
2457
2458         key = hash_long(ip, FTRACE_HASH_BITS);
2459
2460         hhd = &ftrace_func_hash[key];
2461
2462         if (hlist_empty(hhd))
2463                 return;
2464
2465         /*
2466          * Disable preemption for these calls to prevent a RCU grace
2467          * period. This syncs the hash iteration and freeing of items
2468          * on the hash. rcu_read_lock is too dangerous here.
2469          */
2470         preempt_disable_notrace();
2471         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2472                 if (entry->ip == ip)
2473                         entry->ops->func(ip, parent_ip, &entry->data);
2474         }
2475         preempt_enable_notrace();
2476 }
2477
2478 static struct ftrace_ops trace_probe_ops __read_mostly =
2479 {
2480         .func           = function_trace_probe_call,
2481 };
2482
2483 static int ftrace_probe_registered;
2484
2485 static void __enable_ftrace_function_probe(void)
2486 {
2487         int ret;
2488         int i;
2489
2490         if (ftrace_probe_registered)
2491                 return;
2492
2493         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2494                 struct hlist_head *hhd = &ftrace_func_hash[i];
2495                 if (hhd->first)
2496                         break;
2497         }
2498         /* Nothing registered? */
2499         if (i == FTRACE_FUNC_HASHSIZE)
2500                 return;
2501
2502         ret = __register_ftrace_function(&trace_probe_ops);
2503         if (!ret)
2504                 ftrace_startup(&trace_probe_ops, 0);
2505
2506         ftrace_probe_registered = 1;
2507 }
2508
2509 static void __disable_ftrace_function_probe(void)
2510 {
2511         int ret;
2512         int i;
2513
2514         if (!ftrace_probe_registered)
2515                 return;
2516
2517         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2518                 struct hlist_head *hhd = &ftrace_func_hash[i];
2519                 if (hhd->first)
2520                         return;
2521         }
2522
2523         /* no more funcs left */
2524         ret = __unregister_ftrace_function(&trace_probe_ops);
2525         if (!ret)
2526                 ftrace_shutdown(&trace_probe_ops, 0);
2527
2528         ftrace_probe_registered = 0;
2529 }
2530
2531
2532 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2533 {
2534         struct ftrace_func_probe *entry =
2535                 container_of(rhp, struct ftrace_func_probe, rcu);
2536
2537         if (entry->ops->free)
2538                 entry->ops->free(&entry->data);
2539         kfree(entry);
2540 }
2541
2542
2543 int
2544 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2545                               void *data)
2546 {
2547         struct ftrace_func_probe *entry;
2548         struct ftrace_page *pg;
2549         struct dyn_ftrace *rec;
2550         int type, len, not;
2551         unsigned long key;
2552         int count = 0;
2553         char *search;
2554
2555         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2556         len = strlen(search);
2557
2558         /* we do not support '!' for function probes */
2559         if (WARN_ON(not))
2560                 return -EINVAL;
2561
2562         mutex_lock(&ftrace_lock);
2563
2564         if (unlikely(ftrace_disabled))
2565                 goto out_unlock;
2566
2567         do_for_each_ftrace_rec(pg, rec) {
2568
2569                 if (!ftrace_match_record(rec, NULL, search, len, type))
2570                         continue;
2571
2572                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2573                 if (!entry) {
2574                         /* If we did not process any, then return error */
2575                         if (!count)
2576                                 count = -ENOMEM;
2577                         goto out_unlock;
2578                 }
2579
2580                 count++;
2581
2582                 entry->data = data;
2583
2584                 /*
2585                  * The caller might want to do something special
2586                  * for each function we find. We call the callback
2587                  * to give the caller an opportunity to do so.
2588                  */
2589                 if (ops->callback) {
2590                         if (ops->callback(rec->ip, &entry->data) < 0) {
2591                                 /* caller does not like this func */
2592                                 kfree(entry);
2593                                 continue;
2594                         }
2595                 }
2596
2597                 entry->ops = ops;
2598                 entry->ip = rec->ip;
2599
2600                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2601                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2602
2603         } while_for_each_ftrace_rec();
2604         __enable_ftrace_function_probe();
2605
2606  out_unlock:
2607         mutex_unlock(&ftrace_lock);
2608
2609         return count;
2610 }
2611
2612 enum {
2613         PROBE_TEST_FUNC         = 1,
2614         PROBE_TEST_DATA         = 2
2615 };
2616
2617 static void
2618 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2619                                   void *data, int flags)
2620 {
2621         struct ftrace_func_probe *entry;
2622         struct hlist_node *n, *tmp;
2623         char str[KSYM_SYMBOL_LEN];
2624         int type = MATCH_FULL;
2625         int i, len = 0;
2626         char *search;
2627
2628         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2629                 glob = NULL;
2630         else if (glob) {
2631                 int not;
2632
2633                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2634                 len = strlen(search);
2635
2636                 /* we do not support '!' for function probes */
2637                 if (WARN_ON(not))
2638                         return;
2639         }
2640
2641         mutex_lock(&ftrace_lock);
2642         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2643                 struct hlist_head *hhd = &ftrace_func_hash[i];
2644
2645                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2646
2647                         /* break up if statements for readability */
2648                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2649                                 continue;
2650
2651                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2652                                 continue;
2653
2654                         /* do this last, since it is the most expensive */
2655                         if (glob) {
2656                                 kallsyms_lookup(entry->ip, NULL, NULL,
2657                                                 NULL, str);
2658                                 if (!ftrace_match(str, glob, len, type))
2659                                         continue;
2660                         }
2661
2662                         hlist_del(&entry->node);
2663                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2664                 }
2665         }
2666         __disable_ftrace_function_probe();
2667         mutex_unlock(&ftrace_lock);
2668 }
2669
2670 void
2671 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2672                                 void *data)
2673 {
2674         __unregister_ftrace_function_probe(glob, ops, data,
2675                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2676 }
2677
2678 void
2679 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2680 {
2681         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2682 }
2683
2684 void unregister_ftrace_function_probe_all(char *glob)
2685 {
2686         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2687 }
2688
2689 static LIST_HEAD(ftrace_commands);
2690 static DEFINE_MUTEX(ftrace_cmd_mutex);
2691
2692 int register_ftrace_command(struct ftrace_func_command *cmd)
2693 {
2694         struct ftrace_func_command *p;
2695         int ret = 0;
2696
2697         mutex_lock(&ftrace_cmd_mutex);
2698         list_for_each_entry(p, &ftrace_commands, list) {
2699                 if (strcmp(cmd->name, p->name) == 0) {
2700                         ret = -EBUSY;
2701                         goto out_unlock;
2702                 }
2703         }
2704         list_add(&cmd->list, &ftrace_commands);
2705  out_unlock:
2706         mutex_unlock(&ftrace_cmd_mutex);
2707
2708         return ret;
2709 }
2710
2711 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2712 {
2713         struct ftrace_func_command *p, *n;
2714         int ret = -ENODEV;
2715
2716         mutex_lock(&ftrace_cmd_mutex);
2717         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2718                 if (strcmp(cmd->name, p->name) == 0) {
2719                         ret = 0;
2720                         list_del_init(&p->list);
2721                         goto out_unlock;
2722                 }
2723         }
2724  out_unlock:
2725         mutex_unlock(&ftrace_cmd_mutex);
2726
2727         return ret;
2728 }
2729
2730 static int ftrace_process_regex(struct ftrace_hash *hash,
2731                                 char *buff, int len, int enable)
2732 {
2733         char *func, *command, *next = buff;
2734         struct ftrace_func_command *p;
2735         int ret;
2736
2737         func = strsep(&next, ":");
2738
2739         if (!next) {
2740                 ret = ftrace_match_records(hash, func, len);
2741                 if (!ret)
2742                         ret = -EINVAL;
2743                 if (ret < 0)
2744                         return ret;
2745                 return 0;
2746         }
2747
2748         /* command found */
2749
2750         command = strsep(&next, ":");
2751
2752         mutex_lock(&ftrace_cmd_mutex);
2753         list_for_each_entry(p, &ftrace_commands, list) {
2754                 if (strcmp(p->name, command) == 0) {
2755                         ret = p->func(func, command, next, enable);
2756                         goto out_unlock;
2757                 }
2758         }
2759  out_unlock:
2760         mutex_unlock(&ftrace_cmd_mutex);
2761
2762         return ret;
2763 }
2764
2765 static ssize_t
2766 ftrace_regex_write(struct file *file, const char __user *ubuf,
2767                    size_t cnt, loff_t *ppos, int enable)
2768 {
2769         struct ftrace_iterator *iter;
2770         struct trace_parser *parser;
2771         ssize_t ret, read;
2772
2773         if (!cnt)
2774                 return 0;
2775
2776         mutex_lock(&ftrace_regex_lock);
2777
2778         ret = -ENODEV;
2779         if (unlikely(ftrace_disabled))
2780                 goto out_unlock;
2781
2782         if (file->f_mode & FMODE_READ) {
2783                 struct seq_file *m = file->private_data;
2784                 iter = m->private;
2785         } else
2786                 iter = file->private_data;
2787
2788         parser = &iter->parser;
2789         read = trace_get_user(parser, ubuf, cnt, ppos);
2790
2791         if (read >= 0 && trace_parser_loaded(parser) &&
2792             !trace_parser_cont(parser)) {
2793                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2794                                            parser->idx, enable);
2795                 trace_parser_clear(parser);
2796                 if (ret)
2797                         goto out_unlock;
2798         }
2799
2800         ret = read;
2801 out_unlock:
2802         mutex_unlock(&ftrace_regex_lock);
2803
2804         return ret;
2805 }
2806
2807 static ssize_t
2808 ftrace_filter_write(struct file *file, const char __user *ubuf,
2809                     size_t cnt, loff_t *ppos)
2810 {
2811         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2812 }
2813
2814 static ssize_t
2815 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2816                      size_t cnt, loff_t *ppos)
2817 {
2818         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2819 }
2820
2821 static int
2822 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2823                  int reset, int enable)
2824 {
2825         struct ftrace_hash **orig_hash;
2826         struct ftrace_hash *hash;
2827         int ret;
2828
2829         /* All global ops uses the global ops filters */
2830         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2831                 ops = &global_ops;
2832
2833         if (unlikely(ftrace_disabled))
2834                 return -ENODEV;
2835
2836         if (enable)
2837                 orig_hash = &ops->filter_hash;
2838         else
2839                 orig_hash = &ops->notrace_hash;
2840
2841         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2842         if (!hash)
2843                 return -ENOMEM;
2844
2845         mutex_lock(&ftrace_regex_lock);
2846         if (reset)
2847                 ftrace_filter_reset(hash);
2848         if (buf)
2849                 ftrace_match_records(hash, buf, len);
2850
2851         mutex_lock(&ftrace_lock);
2852         ret = ftrace_hash_move(orig_hash, hash);
2853         mutex_unlock(&ftrace_lock);
2854
2855         mutex_unlock(&ftrace_regex_lock);
2856
2857         free_ftrace_hash(hash);
2858         return ret;
2859 }
2860
2861 /**
2862  * ftrace_set_filter - set a function to filter on in ftrace
2863  * @ops - the ops to set the filter with
2864  * @buf - the string that holds the function filter text.
2865  * @len - the length of the string.
2866  * @reset - non zero to reset all filters before applying this filter.
2867  *
2868  * Filters denote which functions should be enabled when tracing is enabled.
2869  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2870  */
2871 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2872                        int len, int reset)
2873 {
2874         ftrace_set_regex(ops, buf, len, reset, 1);
2875 }
2876 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2877
2878 /**
2879  * ftrace_set_notrace - set a function to not trace in ftrace
2880  * @ops - the ops to set the notrace filter with
2881  * @buf - the string that holds the function notrace text.
2882  * @len - the length of the string.
2883  * @reset - non zero to reset all filters before applying this filter.
2884  *
2885  * Notrace Filters denote which functions should not be enabled when tracing
2886  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2887  * for tracing.
2888  */
2889 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2890                         int len, int reset)
2891 {
2892         ftrace_set_regex(ops, buf, len, reset, 0);
2893 }
2894 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2895 /**
2896  * ftrace_set_filter - set a function to filter on in ftrace
2897  * @ops - the ops to set the filter with
2898  * @buf - the string that holds the function filter text.
2899  * @len - the length of the string.
2900  * @reset - non zero to reset all filters before applying this filter.
2901  *
2902  * Filters denote which functions should be enabled when tracing is enabled.
2903  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2904  */
2905 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2906 {
2907         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2908 }
2909 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2910
2911 /**
2912  * ftrace_set_notrace - set a function to not trace in ftrace
2913  * @ops - the ops to set the notrace filter with
2914  * @buf - the string that holds the function notrace text.
2915  * @len - the length of the string.
2916  * @reset - non zero to reset all filters before applying this filter.
2917  *
2918  * Notrace Filters denote which functions should not be enabled when tracing
2919  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2920  * for tracing.
2921  */
2922 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2923 {
2924         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2925 }
2926 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2927
2928 /*
2929  * command line interface to allow users to set filters on boot up.
2930  */
2931 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2932 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2933 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2934
2935 static int __init set_ftrace_notrace(char *str)
2936 {
2937         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2938         return 1;
2939 }
2940 __setup("ftrace_notrace=", set_ftrace_notrace);
2941
2942 static int __init set_ftrace_filter(char *str)
2943 {
2944         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2945         return 1;
2946 }
2947 __setup("ftrace_filter=", set_ftrace_filter);
2948
2949 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2950 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2951 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2952
2953 static int __init set_graph_function(char *str)
2954 {
2955         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2956         return 1;
2957 }
2958 __setup("ftrace_graph_filter=", set_graph_function);
2959
2960 static void __init set_ftrace_early_graph(char *buf)
2961 {
2962         int ret;
2963         char *func;
2964
2965         while (buf) {
2966                 func = strsep(&buf, ",");
2967                 /* we allow only one expression at a time */
2968                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2969                                       func);
2970                 if (ret)
2971                         printk(KERN_DEBUG "ftrace: function %s not "
2972                                           "traceable\n", func);
2973         }
2974 }
2975 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2976
2977 static void __init
2978 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2979 {
2980         char *func;
2981
2982         while (buf) {
2983                 func = strsep(&buf, ",");
2984                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2985         }
2986 }
2987
2988 static void __init set_ftrace_early_filters(void)
2989 {
2990         if (ftrace_filter_buf[0])
2991                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2992         if (ftrace_notrace_buf[0])
2993                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2994 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2995         if (ftrace_graph_buf[0])
2996                 set_ftrace_early_graph(ftrace_graph_buf);
2997 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2998 }
2999
3000 static int
3001 ftrace_regex_release(struct inode *inode, struct file *file)
3002 {
3003         struct seq_file *m = (struct seq_file *)file->private_data;
3004         struct ftrace_iterator *iter;
3005         struct ftrace_hash **orig_hash;
3006         struct trace_parser *parser;
3007         int filter_hash;
3008         int ret;
3009
3010         mutex_lock(&ftrace_regex_lock);
3011         if (file->f_mode & FMODE_READ) {
3012                 iter = m->private;
3013
3014                 seq_release(inode, file);
3015         } else
3016                 iter = file->private_data;
3017
3018         parser = &iter->parser;
3019         if (trace_parser_loaded(parser)) {
3020                 parser->buffer[parser->idx] = 0;
3021                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3022         }
3023
3024         trace_parser_put(parser);
3025
3026         if (file->f_mode & FMODE_WRITE) {
3027                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3028
3029                 if (filter_hash)
3030                         orig_hash = &iter->ops->filter_hash;
3031                 else
3032                         orig_hash = &iter->ops->notrace_hash;
3033
3034                 mutex_lock(&ftrace_lock);
3035                 /*
3036                  * Remove the current set, update the hash and add
3037                  * them back.
3038                  */
3039                 ftrace_hash_rec_disable(iter->ops, filter_hash);
3040                 ret = ftrace_hash_move(orig_hash, iter->hash);
3041                 if (!ret) {
3042                         ftrace_hash_rec_enable(iter->ops, filter_hash);
3043                         if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
3044                             && ftrace_enabled)
3045                                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3046                 }
3047                 mutex_unlock(&ftrace_lock);
3048         }
3049         free_ftrace_hash(iter->hash);
3050         kfree(iter);
3051
3052         mutex_unlock(&ftrace_regex_lock);
3053         return 0;
3054 }
3055
3056 static const struct file_operations ftrace_avail_fops = {
3057         .open = ftrace_avail_open,
3058         .read = seq_read,
3059         .llseek = seq_lseek,
3060         .release = seq_release_private,
3061 };
3062
3063 static const struct file_operations ftrace_enabled_fops = {
3064         .open = ftrace_enabled_open,
3065         .read = seq_read,
3066         .llseek = seq_lseek,
3067         .release = seq_release_private,
3068 };
3069
3070 static const struct file_operations ftrace_filter_fops = {
3071         .open = ftrace_filter_open,
3072         .read = seq_read,
3073         .write = ftrace_filter_write,
3074         .llseek = ftrace_regex_lseek,
3075         .release = ftrace_regex_release,
3076 };
3077
3078 static const struct file_operations ftrace_notrace_fops = {
3079         .open = ftrace_notrace_open,
3080         .read = seq_read,
3081         .write = ftrace_notrace_write,
3082         .llseek = ftrace_regex_lseek,
3083         .release = ftrace_regex_release,
3084 };
3085
3086 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3087
3088 static DEFINE_MUTEX(graph_lock);
3089
3090 int ftrace_graph_count;
3091 int ftrace_graph_filter_enabled;
3092 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3093
3094 static void *
3095 __g_next(struct seq_file *m, loff_t *pos)
3096 {
3097         if (*pos >= ftrace_graph_count)
3098                 return NULL;
3099         return &ftrace_graph_funcs[*pos];
3100 }
3101
3102 static void *
3103 g_next(struct seq_file *m, void *v, loff_t *pos)
3104 {
3105         (*pos)++;
3106         return __g_next(m, pos);
3107 }
3108
3109 static void *g_start(struct seq_file *m, loff_t *pos)
3110 {
3111         mutex_lock(&graph_lock);
3112
3113         /* Nothing, tell g_show to print all functions are enabled */
3114         if (!ftrace_graph_filter_enabled && !*pos)
3115                 return (void *)1;
3116
3117         return __g_next(m, pos);
3118 }
3119
3120 static void g_stop(struct seq_file *m, void *p)
3121 {
3122         mutex_unlock(&graph_lock);
3123 }
3124
3125 static int g_show(struct seq_file *m, void *v)
3126 {
3127         unsigned long *ptr = v;
3128
3129         if (!ptr)
3130                 return 0;
3131
3132         if (ptr == (unsigned long *)1) {
3133                 seq_printf(m, "#### all functions enabled ####\n");
3134                 return 0;
3135         }
3136
3137         seq_printf(m, "%ps\n", (void *)*ptr);
3138
3139         return 0;
3140 }
3141
3142 static const struct seq_operations ftrace_graph_seq_ops = {
3143         .start = g_start,
3144         .next = g_next,
3145         .stop = g_stop,
3146         .show = g_show,
3147 };
3148
3149 static int
3150 ftrace_graph_open(struct inode *inode, struct file *file)
3151 {
3152         int ret = 0;
3153
3154         if (unlikely(ftrace_disabled))
3155                 return -ENODEV;
3156
3157         mutex_lock(&graph_lock);
3158         if ((file->f_mode & FMODE_WRITE) &&
3159             (file->f_flags & O_TRUNC)) {
3160                 ftrace_graph_filter_enabled = 0;
3161                 ftrace_graph_count = 0;
3162                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3163         }
3164         mutex_unlock(&graph_lock);
3165
3166         if (file->f_mode & FMODE_READ)
3167                 ret = seq_open(file, &ftrace_graph_seq_ops);
3168
3169         return ret;
3170 }
3171
3172 static int
3173 ftrace_graph_release(struct inode *inode, struct file *file)
3174 {
3175         if (file->f_mode & FMODE_READ)
3176                 seq_release(inode, file);
3177         return 0;
3178 }
3179
3180 static int
3181 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3182 {
3183         struct dyn_ftrace *rec;
3184         struct ftrace_page *pg;
3185         int search_len;
3186         int fail = 1;
3187         int type, not;
3188         char *search;
3189         bool exists;
3190         int i;
3191
3192         /* decode regex */
3193         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3194         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3195                 return -EBUSY;
3196
3197         search_len = strlen(search);
3198
3199         mutex_lock(&ftrace_lock);
3200
3201         if (unlikely(ftrace_disabled)) {
3202                 mutex_unlock(&ftrace_lock);
3203                 return -ENODEV;
3204         }
3205
3206         do_for_each_ftrace_rec(pg, rec) {
3207
3208                 if (rec->flags & FTRACE_FL_FREE)
3209                         continue;
3210
3211                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3212                         /* if it is in the array */
3213                         exists = false;
3214                         for (i = 0; i < *idx; i++) {
3215                                 if (array[i] == rec->ip) {
3216                                         exists = true;
3217                                         break;
3218                                 }
3219                         }
3220
3221                         if (!not) {
3222                                 fail = 0;
3223                                 if (!exists) {
3224                                         array[(*idx)++] = rec->ip;
3225                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3226                                                 goto out;
3227                                 }
3228                         } else {
3229                                 if (exists) {
3230                                         array[i] = array[--(*idx)];
3231                                         array[*idx] = 0;
3232                                         fail = 0;
3233                                 }
3234                         }
3235                 }
3236         } while_for_each_ftrace_rec();
3237 out:
3238         mutex_unlock(&ftrace_lock);
3239
3240         if (fail)
3241                 return -EINVAL;
3242
3243         ftrace_graph_filter_enabled = 1;
3244         return 0;
3245 }
3246
3247 static ssize_t
3248 ftrace_graph_write(struct file *file, const char __user *ubuf,
3249                    size_t cnt, loff_t *ppos)
3250 {
3251         struct trace_parser parser;
3252         ssize_t read, ret;
3253
3254         if (!cnt)
3255                 return 0;
3256
3257         mutex_lock(&graph_lock);
3258
3259         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3260                 ret = -ENOMEM;
3261                 goto out_unlock;
3262         }
3263
3264         read = trace_get_user(&parser, ubuf, cnt, ppos);
3265
3266         if (read >= 0 && trace_parser_loaded((&parser))) {
3267                 parser.buffer[parser.idx] = 0;
3268
3269                 /* we allow only one expression at a time */
3270                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3271                                         parser.buffer);
3272                 if (ret)
3273                         goto out_free;
3274         }
3275
3276         ret = read;
3277
3278 out_free:
3279         trace_parser_put(&parser);
3280 out_unlock:
3281         mutex_unlock(&graph_lock);
3282
3283         return ret;
3284 }
3285
3286 static const struct file_operations ftrace_graph_fops = {
3287         .open           = ftrace_graph_open,
3288         .read           = seq_read,
3289         .write          = ftrace_graph_write,
3290         .release        = ftrace_graph_release,
3291         .llseek         = seq_lseek,
3292 };
3293 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3294
3295 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3296 {
3297
3298         trace_create_file("available_filter_functions", 0444,
3299                         d_tracer, NULL, &ftrace_avail_fops);
3300
3301         trace_create_file("enabled_functions", 0444,
3302                         d_tracer, NULL, &ftrace_enabled_fops);
3303
3304         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3305                         NULL, &ftrace_filter_fops);
3306
3307         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3308                                     NULL, &ftrace_notrace_fops);
3309
3310 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3311         trace_create_file("set_graph_function", 0444, d_tracer,
3312                                     NULL,
3313                                     &ftrace_graph_fops);
3314 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3315
3316         return 0;
3317 }
3318
3319 static int ftrace_process_locs(struct module *mod,
3320                                unsigned long *start,
3321                                unsigned long *end)
3322 {
3323         unsigned long *p;
3324         unsigned long addr;
3325
3326         mutex_lock(&ftrace_lock);
3327         p = start;
3328         while (p < end) {
3329                 addr = ftrace_call_adjust(*p++);
3330                 /*
3331                  * Some architecture linkers will pad between
3332                  * the different mcount_loc sections of different
3333                  * object files to satisfy alignments.
3334                  * Skip any NULL pointers.
3335                  */
3336                 if (!addr)
3337                         continue;
3338                 ftrace_record_ip(addr);
3339         }
3340
3341         ftrace_update_code(mod);
3342         mutex_unlock(&ftrace_lock);
3343
3344         return 0;
3345 }
3346
3347 #ifdef CONFIG_MODULES
3348 void ftrace_release_mod(struct module *mod)
3349 {
3350         struct dyn_ftrace *rec;
3351         struct ftrace_page *pg;
3352
3353         mutex_lock(&ftrace_lock);
3354
3355         if (ftrace_disabled)
3356                 goto out_unlock;
3357
3358         do_for_each_ftrace_rec(pg, rec) {
3359                 if (within_module_core(rec->ip, mod)) {
3360                         /*
3361                          * rec->ip is changed in ftrace_free_rec()
3362                          * It should not between s and e if record was freed.
3363                          */
3364                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3365                         ftrace_free_rec(rec);
3366                 }
3367         } while_for_each_ftrace_rec();
3368  out_unlock:
3369         mutex_unlock(&ftrace_lock);
3370 }
3371
3372 static void ftrace_init_module(struct module *mod,
3373                                unsigned long *start, unsigned long *end)
3374 {
3375         if (ftrace_disabled || start == end)
3376                 return;
3377         ftrace_process_locs(mod, start, end);
3378 }
3379
3380 static int ftrace_module_notify(struct notifier_block *self,
3381                                 unsigned long val, void *data)
3382 {
3383         struct module *mod = data;
3384
3385         switch (val) {
3386         case MODULE_STATE_COMING:
3387                 ftrace_init_module(mod, mod->ftrace_callsites,
3388                                    mod->ftrace_callsites +
3389                                    mod->num_ftrace_callsites);
3390                 break;
3391         case MODULE_STATE_GOING:
3392                 ftrace_release_mod(mod);
3393                 break;
3394         }
3395
3396         return 0;
3397 }
3398 #else
3399 static int ftrace_module_notify(struct notifier_block *self,
3400                                 unsigned long val, void *data)
3401 {
3402         return 0;
3403 }
3404 #endif /* CONFIG_MODULES */
3405
3406 struct notifier_block ftrace_module_nb = {
3407         .notifier_call = ftrace_module_notify,
3408         .priority = 0,
3409 };
3410
3411 extern unsigned long __start_mcount_loc[];
3412 extern unsigned long __stop_mcount_loc[];
3413
3414 void __init ftrace_init(void)
3415 {
3416         unsigned long count, addr, flags;
3417         int ret;
3418
3419         /* Keep the ftrace pointer to the stub */
3420         addr = (unsigned long)ftrace_stub;
3421
3422         local_irq_save(flags);
3423         ftrace_dyn_arch_init(&addr);
3424         local_irq_restore(flags);
3425
3426         /* ftrace_dyn_arch_init places the return code in addr */
3427         if (addr)
3428                 goto failed;
3429
3430         count = __stop_mcount_loc - __start_mcount_loc;
3431
3432         ret = ftrace_dyn_table_alloc(count);
3433         if (ret)
3434                 goto failed;
3435
3436         last_ftrace_enabled = ftrace_enabled = 1;
3437
3438         ret = ftrace_process_locs(NULL,
3439                                   __start_mcount_loc,
3440                                   __stop_mcount_loc);
3441
3442         ret = register_module_notifier(&ftrace_module_nb);
3443         if (ret)
3444                 pr_warning("Failed to register trace ftrace module notifier\n");
3445
3446         set_ftrace_early_filters();
3447
3448         return;
3449  failed:
3450         ftrace_disabled = 1;
3451 }
3452
3453 #else
3454
3455 static struct ftrace_ops global_ops = {
3456         .func                   = ftrace_stub,
3457 };
3458
3459 static int __init ftrace_nodyn_init(void)
3460 {
3461         ftrace_enabled = 1;
3462         return 0;
3463 }
3464 device_initcall(ftrace_nodyn_init);
3465
3466 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3467 static inline void ftrace_startup_enable(int command) { }
3468 /* Keep as macros so we do not need to define the commands */
3469 # define ftrace_startup(ops, command)   do { } while (0)
3470 # define ftrace_shutdown(ops, command)  do { } while (0)
3471 # define ftrace_startup_sysctl()        do { } while (0)
3472 # define ftrace_shutdown_sysctl()       do { } while (0)
3473
3474 static inline int
3475 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3476 {
3477         return 1;
3478 }
3479
3480 #endif /* CONFIG_DYNAMIC_FTRACE */
3481
3482 static void
3483 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3484 {
3485         struct ftrace_ops *op;
3486
3487         /*
3488          * Some of the ops may be dynamically allocated,
3489          * they must be freed after a synchronize_sched().
3490          */
3491         preempt_disable_notrace();
3492         op = rcu_dereference_raw(ftrace_ops_list);
3493         while (op != &ftrace_list_end) {
3494                 if (ftrace_ops_test(op, ip))
3495                         op->func(ip, parent_ip);
3496                 op = rcu_dereference_raw(op->next);
3497         };
3498         preempt_enable_notrace();
3499 }
3500
3501 static void clear_ftrace_swapper(void)
3502 {
3503         struct task_struct *p;
3504         int cpu;
3505
3506         get_online_cpus();
3507         for_each_online_cpu(cpu) {
3508                 p = idle_task(cpu);
3509                 clear_tsk_trace_trace(p);
3510         }
3511         put_online_cpus();
3512 }
3513
3514 static void set_ftrace_swapper(void)
3515 {
3516         struct task_struct *p;
3517         int cpu;
3518
3519         get_online_cpus();
3520         for_each_online_cpu(cpu) {
3521                 p = idle_task(cpu);
3522                 set_tsk_trace_trace(p);
3523         }
3524         put_online_cpus();
3525 }
3526
3527 static void clear_ftrace_pid(struct pid *pid)
3528 {
3529         struct task_struct *p;
3530
3531         rcu_read_lock();
3532         do_each_pid_task(pid, PIDTYPE_PID, p) {
3533                 clear_tsk_trace_trace(p);
3534         } while_each_pid_task(pid, PIDTYPE_PID, p);
3535         rcu_read_unlock();
3536
3537         put_pid(pid);
3538 }
3539
3540 static void set_ftrace_pid(struct pid *pid)
3541 {
3542         struct task_struct *p;
3543
3544         rcu_read_lock();
3545         do_each_pid_task(pid, PIDTYPE_PID, p) {
3546                 set_tsk_trace_trace(p);
3547         } while_each_pid_task(pid, PIDTYPE_PID, p);
3548         rcu_read_unlock();
3549 }
3550
3551 static void clear_ftrace_pid_task(struct pid *pid)
3552 {
3553         if (pid == ftrace_swapper_pid)
3554                 clear_ftrace_swapper();
3555         else
3556                 clear_ftrace_pid(pid);
3557 }
3558
3559 static void set_ftrace_pid_task(struct pid *pid)
3560 {
3561         if (pid == ftrace_swapper_pid)
3562                 set_ftrace_swapper();
3563         else
3564                 set_ftrace_pid(pid);
3565 }
3566
3567 static int ftrace_pid_add(int p)
3568 {
3569         struct pid *pid;
3570         struct ftrace_pid *fpid;
3571         int ret = -EINVAL;
3572
3573         mutex_lock(&ftrace_lock);
3574
3575         if (!p)
3576                 pid = ftrace_swapper_pid;
3577         else
3578                 pid = find_get_pid(p);
3579
3580         if (!pid)
3581                 goto out;
3582
3583         ret = 0;
3584
3585         list_for_each_entry(fpid, &ftrace_pids, list)
3586                 if (fpid->pid == pid)
3587                         goto out_put;
3588
3589         ret = -ENOMEM;
3590
3591         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3592         if (!fpid)
3593                 goto out_put;
3594
3595         list_add(&fpid->list, &ftrace_pids);
3596         fpid->pid = pid;
3597
3598         set_ftrace_pid_task(pid);
3599
3600         ftrace_update_pid_func();
3601         ftrace_startup_enable(0);
3602
3603         mutex_unlock(&ftrace_lock);
3604         return 0;
3605
3606 out_put:
3607         if (pid != ftrace_swapper_pid)
3608                 put_pid(pid);
3609
3610 out:
3611         mutex_unlock(&ftrace_lock);
3612         return ret;
3613 }
3614
3615 static void ftrace_pid_reset(void)
3616 {
3617         struct ftrace_pid *fpid, *safe;
3618
3619         mutex_lock(&ftrace_lock);
3620         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3621                 struct pid *pid = fpid->pid;
3622
3623                 clear_ftrace_pid_task(pid);
3624
3625                 list_del(&fpid->list);
3626                 kfree(fpid);
3627         }
3628
3629         ftrace_update_pid_func();
3630         ftrace_startup_enable(0);
3631
3632         mutex_unlock(&ftrace_lock);
3633 }
3634
3635 static void *fpid_start(struct seq_file *m, loff_t *pos)
3636 {
3637         mutex_lock(&ftrace_lock);
3638
3639         if (list_empty(&ftrace_pids) && (!*pos))
3640                 return (void *) 1;
3641
3642         return seq_list_start(&ftrace_pids, *pos);
3643 }
3644
3645 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3646 {
3647         if (v == (void *)1)
3648                 return NULL;
3649
3650         return seq_list_next(v, &ftrace_pids, pos);
3651 }
3652
3653 static void fpid_stop(struct seq_file *m, void *p)
3654 {
3655         mutex_unlock(&ftrace_lock);
3656 }
3657
3658 static int fpid_show(struct seq_file *m, void *v)
3659 {
3660         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3661
3662         if (v == (void *)1) {
3663                 seq_printf(m, "no pid\n");
3664                 return 0;
3665         }
3666
3667         if (fpid->pid == ftrace_swapper_pid)
3668                 seq_printf(m, "swapper tasks\n");
3669         else
3670                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3671
3672         return 0;
3673 }
3674
3675 static const struct seq_operations ftrace_pid_sops = {
3676         .start = fpid_start,
3677         .next = fpid_next,
3678         .stop = fpid_stop,
3679         .show = fpid_show,
3680 };
3681
3682 static int
3683 ftrace_pid_open(struct inode *inode, struct file *file)
3684 {
3685         int ret = 0;
3686
3687         if ((file->f_mode & FMODE_WRITE) &&
3688             (file->f_flags & O_TRUNC))
3689                 ftrace_pid_reset();
3690
3691         if (file->f_mode & FMODE_READ)
3692                 ret = seq_open(file, &ftrace_pid_sops);
3693
3694         return ret;
3695 }
3696
3697 static ssize_t
3698 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3699                    size_t cnt, loff_t *ppos)
3700 {
3701         char buf[64], *tmp;
3702         long val;
3703         int ret;
3704
3705         if (cnt >= sizeof(buf))
3706                 return -EINVAL;
3707
3708         if (copy_from_user(&buf, ubuf, cnt))
3709                 return -EFAULT;
3710
3711         buf[cnt] = 0;
3712
3713         /*
3714          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3715          * to clean the filter quietly.
3716          */
3717         tmp = strstrip(buf);
3718         if (strlen(tmp) == 0)
3719                 return 1;
3720
3721         ret = strict_strtol(tmp, 10, &val);
3722         if (ret < 0)
3723                 return ret;
3724
3725         ret = ftrace_pid_add(val);
3726
3727         return ret ? ret : cnt;
3728 }
3729
3730 static int
3731 ftrace_pid_release(struct inode *inode, struct file *file)
3732 {
3733         if (file->f_mode & FMODE_READ)
3734                 seq_release(inode, file);
3735
3736         return 0;
3737 }
3738
3739 static const struct file_operations ftrace_pid_fops = {
3740         .open           = ftrace_pid_open,
3741         .write          = ftrace_pid_write,
3742         .read           = seq_read,
3743         .llseek         = seq_lseek,
3744         .release        = ftrace_pid_release,
3745 };
3746
3747 static __init int ftrace_init_debugfs(void)
3748 {
3749         struct dentry *d_tracer;
3750
3751         d_tracer = tracing_init_dentry();
3752         if (!d_tracer)
3753                 return 0;
3754
3755         ftrace_init_dyn_debugfs(d_tracer);
3756
3757         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3758                             NULL, &ftrace_pid_fops);
3759
3760         ftrace_profile_debugfs(d_tracer);
3761
3762         return 0;
3763 }
3764 fs_initcall(ftrace_init_debugfs);
3765
3766 /**
3767  * ftrace_kill - kill ftrace
3768  *
3769  * This function should be used by panic code. It stops ftrace
3770  * but in a not so nice way. If you need to simply kill ftrace
3771  * from a non-atomic section, use ftrace_kill.
3772  */
3773 void ftrace_kill(void)
3774 {
3775         ftrace_disabled = 1;
3776         ftrace_enabled = 0;
3777         clear_ftrace_function();
3778 }
3779
3780 /**
3781  * register_ftrace_function - register a function for profiling
3782  * @ops - ops structure that holds the function for profiling.
3783  *
3784  * Register a function to be called by all functions in the
3785  * kernel.
3786  *
3787  * Note: @ops->func and all the functions it calls must be labeled
3788  *       with "notrace", otherwise it will go into a
3789  *       recursive loop.
3790  */
3791 int register_ftrace_function(struct ftrace_ops *ops)
3792 {
3793         int ret = -1;
3794
3795         mutex_lock(&ftrace_lock);
3796
3797         if (unlikely(ftrace_disabled))
3798                 goto out_unlock;
3799
3800         ret = __register_ftrace_function(ops);
3801         if (!ret)
3802                 ftrace_startup(ops, 0);
3803
3804
3805  out_unlock:
3806         mutex_unlock(&ftrace_lock);
3807         return ret;
3808 }
3809 EXPORT_SYMBOL_GPL(register_ftrace_function);
3810
3811 /**
3812  * unregister_ftrace_function - unregister a function for profiling.
3813  * @ops - ops structure that holds the function to unregister
3814  *
3815  * Unregister a function that was added to be called by ftrace profiling.
3816  */
3817 int unregister_ftrace_function(struct ftrace_ops *ops)
3818 {
3819         int ret;
3820
3821         mutex_lock(&ftrace_lock);
3822         ret = __unregister_ftrace_function(ops);
3823         if (!ret)
3824                 ftrace_shutdown(ops, 0);
3825         mutex_unlock(&ftrace_lock);
3826
3827         return ret;
3828 }
3829 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3830
3831 int
3832 ftrace_enable_sysctl(struct ctl_table *table, int write,
3833                      void __user *buffer, size_t *lenp,
3834                      loff_t *ppos)
3835 {
3836         int ret = -ENODEV;
3837
3838         mutex_lock(&ftrace_lock);
3839
3840         if (unlikely(ftrace_disabled))
3841                 goto out;
3842
3843         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3844
3845         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3846                 goto out;
3847
3848         last_ftrace_enabled = !!ftrace_enabled;
3849
3850         if (ftrace_enabled) {
3851
3852                 ftrace_startup_sysctl();
3853
3854                 /* we are starting ftrace again */
3855                 if (ftrace_ops_list != &ftrace_list_end) {
3856                         if (ftrace_ops_list->next == &ftrace_list_end)
3857                                 ftrace_trace_function = ftrace_ops_list->func;
3858                         else
3859                                 ftrace_trace_function = ftrace_ops_list_func;
3860                 }
3861
3862         } else {
3863                 /* stopping ftrace calls (just send to ftrace_stub) */
3864                 ftrace_trace_function = ftrace_stub;
3865
3866                 ftrace_shutdown_sysctl();
3867         }
3868
3869  out:
3870         mutex_unlock(&ftrace_lock);
3871         return ret;
3872 }
3873
3874 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3875
3876 static int ftrace_graph_active;
3877 static struct notifier_block ftrace_suspend_notifier;
3878
3879 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3880 {
3881         return 0;
3882 }
3883
3884 /* The callbacks that hook a function */
3885 trace_func_graph_ret_t ftrace_graph_return =
3886                         (trace_func_graph_ret_t)ftrace_stub;
3887 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3888
3889 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3890 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3891 {
3892         int i;
3893         int ret = 0;
3894         unsigned long flags;
3895         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3896         struct task_struct *g, *t;
3897
3898         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3899                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3900                                         * sizeof(struct ftrace_ret_stack),
3901                                         GFP_KERNEL);
3902                 if (!ret_stack_list[i]) {
3903                         start = 0;
3904                         end = i;
3905                         ret = -ENOMEM;
3906                         goto free;
3907                 }
3908         }
3909
3910         read_lock_irqsave(&tasklist_lock, flags);
3911         do_each_thread(g, t) {
3912                 if (start == end) {
3913                         ret = -EAGAIN;
3914                         goto unlock;
3915                 }
3916
3917                 if (t->ret_stack == NULL) {
3918                         atomic_set(&t->tracing_graph_pause, 0);
3919                         atomic_set(&t->trace_overrun, 0);
3920                         t->curr_ret_stack = -1;
3921                         /* Make sure the tasks see the -1 first: */
3922                         smp_wmb();
3923                         t->ret_stack = ret_stack_list[start++];
3924                 }
3925         } while_each_thread(g, t);
3926
3927 unlock:
3928         read_unlock_irqrestore(&tasklist_lock, flags);
3929 free:
3930         for (i = start; i < end; i++)
3931                 kfree(ret_stack_list[i]);
3932         return ret;
3933 }
3934
3935 static void
3936 ftrace_graph_probe_sched_switch(void *ignore,
3937                         struct task_struct *prev, struct task_struct *next)
3938 {
3939         unsigned long long timestamp;
3940         int index;
3941
3942         /*
3943          * Does the user want to count the time a function was asleep.
3944          * If so, do not update the time stamps.
3945          */
3946         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3947                 return;
3948
3949         timestamp = trace_clock_local();
3950
3951         prev->ftrace_timestamp = timestamp;
3952
3953         /* only process tasks that we timestamped */
3954         if (!next->ftrace_timestamp)
3955                 return;
3956
3957         /*
3958          * Update all the counters in next to make up for the
3959          * time next was sleeping.
3960          */
3961         timestamp -= next->ftrace_timestamp;
3962
3963         for (index = next->curr_ret_stack; index >= 0; index--)
3964                 next->ret_stack[index].calltime += timestamp;
3965 }
3966
3967 /* Allocate a return stack for each task */
3968 static int start_graph_tracing(void)
3969 {
3970         struct ftrace_ret_stack **ret_stack_list;
3971         int ret, cpu;
3972
3973         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3974                                 sizeof(struct ftrace_ret_stack *),
3975                                 GFP_KERNEL);
3976
3977         if (!ret_stack_list)
3978                 return -ENOMEM;
3979
3980         /* The cpu_boot init_task->ret_stack will never be freed */
3981         for_each_online_cpu(cpu) {
3982                 if (!idle_task(cpu)->ret_stack)
3983                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3984         }
3985
3986         do {
3987                 ret = alloc_retstack_tasklist(ret_stack_list);
3988         } while (ret == -EAGAIN);
3989
3990         if (!ret) {
3991                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3992                 if (ret)
3993                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3994                                 " probe to kernel_sched_switch\n");
3995         }
3996
3997         kfree(ret_stack_list);
3998         return ret;
3999 }
4000
4001 /*
4002  * Hibernation protection.
4003  * The state of the current task is too much unstable during
4004  * suspend/restore to disk. We want to protect against that.
4005  */
4006 static int
4007 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4008                                                         void *unused)
4009 {
4010         switch (state) {
4011         case PM_HIBERNATION_PREPARE:
4012                 pause_graph_tracing();
4013                 break;
4014
4015         case PM_POST_HIBERNATION:
4016                 unpause_graph_tracing();
4017                 break;
4018         }
4019         return NOTIFY_DONE;
4020 }
4021
4022 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4023                         trace_func_graph_ent_t entryfunc)
4024 {
4025         int ret = 0;
4026
4027         mutex_lock(&ftrace_lock);
4028
4029         /* we currently allow only one tracer registered at a time */
4030         if (ftrace_graph_active) {
4031                 ret = -EBUSY;
4032                 goto out;
4033         }
4034
4035         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4036         register_pm_notifier(&ftrace_suspend_notifier);
4037
4038         ftrace_graph_active++;
4039         ret = start_graph_tracing();
4040         if (ret) {
4041                 ftrace_graph_active--;
4042                 goto out;
4043         }
4044
4045         ftrace_graph_return = retfunc;
4046         ftrace_graph_entry = entryfunc;
4047
4048         ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4049
4050 out:
4051         mutex_unlock(&ftrace_lock);
4052         return ret;
4053 }
4054
4055 void unregister_ftrace_graph(void)
4056 {
4057         mutex_lock(&ftrace_lock);
4058
4059         if (unlikely(!ftrace_graph_active))
4060                 goto out;
4061
4062         ftrace_graph_active--;
4063         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4064         ftrace_graph_entry = ftrace_graph_entry_stub;
4065         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4066         unregister_pm_notifier(&ftrace_suspend_notifier);
4067         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4068
4069  out:
4070         mutex_unlock(&ftrace_lock);
4071 }
4072
4073 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4074
4075 static void
4076 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4077 {
4078         atomic_set(&t->tracing_graph_pause, 0);
4079         atomic_set(&t->trace_overrun, 0);
4080         t->ftrace_timestamp = 0;
4081         /* make curr_ret_stack visible before we add the ret_stack */
4082         smp_wmb();
4083         t->ret_stack = ret_stack;
4084 }
4085
4086 /*
4087  * Allocate a return stack for the idle task. May be the first
4088  * time through, or it may be done by CPU hotplug online.
4089  */
4090 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4091 {
4092         t->curr_ret_stack = -1;
4093         /*
4094          * The idle task has no parent, it either has its own
4095          * stack or no stack at all.
4096          */
4097         if (t->ret_stack)
4098                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4099
4100         if (ftrace_graph_active) {
4101                 struct ftrace_ret_stack *ret_stack;
4102
4103                 ret_stack = per_cpu(idle_ret_stack, cpu);
4104                 if (!ret_stack) {
4105                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4106                                             * sizeof(struct ftrace_ret_stack),
4107                                             GFP_KERNEL);
4108                         if (!ret_stack)
4109                                 return;
4110                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4111                 }
4112                 graph_init_task(t, ret_stack);
4113         }
4114 }
4115
4116 /* Allocate a return stack for newly created task */
4117 void ftrace_graph_init_task(struct task_struct *t)
4118 {
4119         /* Make sure we do not use the parent ret_stack */
4120         t->ret_stack = NULL;
4121         t->curr_ret_stack = -1;
4122
4123         if (ftrace_graph_active) {
4124                 struct ftrace_ret_stack *ret_stack;
4125
4126                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4127                                 * sizeof(struct ftrace_ret_stack),
4128                                 GFP_KERNEL);
4129                 if (!ret_stack)
4130                         return;
4131                 graph_init_task(t, ret_stack);
4132         }
4133 }
4134
4135 void ftrace_graph_exit_task(struct task_struct *t)
4136 {
4137         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4138
4139         t->ret_stack = NULL;
4140         /* NULL must become visible to IRQs before we free it: */
4141         barrier();
4142
4143         kfree(ret_stack);
4144 }
4145
4146 void ftrace_graph_stop(void)
4147 {
4148         ftrace_stop();
4149 }
4150 #endif