ftrace: Fix unregister ftrace_ops accounting
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         ({                                      \
42                 int ___r = cond;                \
43                 if (WARN_ON(___r))              \
44                         ftrace_kill();          \
45                 ___r;                           \
46         })
47
48 #define FTRACE_WARN_ON_ONCE(cond)               \
49         ({                                      \
50                 int ___r = cond;                \
51                 if (WARN_ON_ONCE(___r))         \
52                         ftrace_kill();          \
53                 ___r;                           \
54         })
55
56 /* hash bits for specific function selection */
57 #define FTRACE_HASH_BITS 7
58 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
59 #define FTRACE_HASH_DEFAULT_BITS 10
60 #define FTRACE_HASH_MAX_BITS 12
61
62 /* ftrace_enabled is a method to turn ftrace on or off */
63 int ftrace_enabled __read_mostly;
64 static int last_ftrace_enabled;
65
66 /* Quick disabling of function tracer. */
67 int function_trace_stop;
68
69 /* List for set_ftrace_pid's pids. */
70 LIST_HEAD(ftrace_pids);
71 struct ftrace_pid {
72         struct list_head list;
73         struct pid *pid;
74 };
75
76 /*
77  * ftrace_disabled is set when an anomaly is discovered.
78  * ftrace_disabled is much stronger than ftrace_enabled.
79  */
80 static int ftrace_disabled __read_mostly;
81
82 static DEFINE_MUTEX(ftrace_lock);
83
84 static struct ftrace_ops ftrace_list_end __read_mostly = {
85         .func           = ftrace_stub,
86 };
87
88 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
92 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94 static struct ftrace_ops global_ops;
95
96 static void
97 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
98
99 /*
100  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
101  * can use rcu_dereference_raw() is that elements removed from this list
102  * are simply leaked, so there is no need to interact with a grace-period
103  * mechanism.  The rcu_dereference_raw() calls are needed to handle
104  * concurrent insertions into the ftrace_global_list.
105  *
106  * Silly Alpha and silly pointer-speculation compiler optimizations!
107  */
108 static void ftrace_global_list_func(unsigned long ip,
109                                     unsigned long parent_ip)
110 {
111         struct ftrace_ops *op;
112
113         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
114                 return;
115
116         trace_recursion_set(TRACE_GLOBAL_BIT);
117         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
118         while (op != &ftrace_list_end) {
119                 op->func(ip, parent_ip);
120                 op = rcu_dereference_raw(op->next); /*see above*/
121         };
122         trace_recursion_clear(TRACE_GLOBAL_BIT);
123 }
124
125 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
126 {
127         if (!test_tsk_trace_trace(current))
128                 return;
129
130         ftrace_pid_function(ip, parent_ip);
131 }
132
133 static void set_ftrace_pid_function(ftrace_func_t func)
134 {
135         /* do not set ftrace_pid_function to itself! */
136         if (func != ftrace_pid_func)
137                 ftrace_pid_function = func;
138 }
139
140 /**
141  * clear_ftrace_function - reset the ftrace function
142  *
143  * This NULLs the ftrace function and in essence stops
144  * tracing.  There may be lag
145  */
146 void clear_ftrace_function(void)
147 {
148         ftrace_trace_function = ftrace_stub;
149         __ftrace_trace_function = ftrace_stub;
150         __ftrace_trace_function_delay = ftrace_stub;
151         ftrace_pid_function = ftrace_stub;
152 }
153
154 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155 /*
156  * For those archs that do not test ftrace_trace_stop in their
157  * mcount call site, we need to do it from C.
158  */
159 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160 {
161         if (function_trace_stop)
162                 return;
163
164         __ftrace_trace_function(ip, parent_ip);
165 }
166 #endif
167
168 static void update_global_ops(void)
169 {
170         ftrace_func_t func;
171
172         /*
173          * If there's only one function registered, then call that
174          * function directly. Otherwise, we need to iterate over the
175          * registered callers.
176          */
177         if (ftrace_global_list == &ftrace_list_end ||
178             ftrace_global_list->next == &ftrace_list_end)
179                 func = ftrace_global_list->func;
180         else
181                 func = ftrace_global_list_func;
182
183         /* If we filter on pids, update to use the pid function */
184         if (!list_empty(&ftrace_pids)) {
185                 set_ftrace_pid_function(func);
186                 func = ftrace_pid_func;
187         }
188
189         global_ops.func = func;
190 }
191
192 static void update_ftrace_function(void)
193 {
194         ftrace_func_t func;
195
196         update_global_ops();
197
198         /*
199          * If we are at the end of the list and this ops is
200          * not dynamic, then have the mcount trampoline call
201          * the function directly
202          */
203         if (ftrace_ops_list == &ftrace_list_end ||
204             (ftrace_ops_list->next == &ftrace_list_end &&
205              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
206                 func = ftrace_ops_list->func;
207         else
208                 func = ftrace_ops_list_func;
209
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211         ftrace_trace_function = func;
212 #else
213 #ifdef CONFIG_DYNAMIC_FTRACE
214         /* do not update till all functions have been modified */
215         __ftrace_trace_function_delay = func;
216 #else
217         __ftrace_trace_function = func;
218 #endif
219         ftrace_trace_function = ftrace_test_stop_func;
220 #endif
221 }
222
223 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
224 {
225         ops->next = *list;
226         /*
227          * We are entering ops into the list but another
228          * CPU might be walking that list. We need to make sure
229          * the ops->next pointer is valid before another CPU sees
230          * the ops pointer included into the list.
231          */
232         rcu_assign_pointer(*list, ops);
233 }
234
235 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
236 {
237         struct ftrace_ops **p;
238
239         /*
240          * If we are removing the last function, then simply point
241          * to the ftrace_stub.
242          */
243         if (*list == ops && ops->next == &ftrace_list_end) {
244                 *list = &ftrace_list_end;
245                 return 0;
246         }
247
248         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
249                 if (*p == ops)
250                         break;
251
252         if (*p != ops)
253                 return -1;
254
255         *p = (*p)->next;
256         return 0;
257 }
258
259 static int __register_ftrace_function(struct ftrace_ops *ops)
260 {
261         if (ftrace_disabled)
262                 return -ENODEV;
263
264         if (FTRACE_WARN_ON(ops == &global_ops))
265                 return -EINVAL;
266
267         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
268                 return -EBUSY;
269
270         if (!core_kernel_data((unsigned long)ops))
271                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
272
273         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
274                 int first = ftrace_global_list == &ftrace_list_end;
275                 add_ftrace_ops(&ftrace_global_list, ops);
276                 ops->flags |= FTRACE_OPS_FL_ENABLED;
277                 if (first)
278                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
279         } else
280                 add_ftrace_ops(&ftrace_ops_list, ops);
281
282         if (ftrace_enabled)
283                 update_ftrace_function();
284
285         return 0;
286 }
287
288 static int __unregister_ftrace_function(struct ftrace_ops *ops)
289 {
290         int ret;
291
292         if (ftrace_disabled)
293                 return -ENODEV;
294
295         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
296                 return -EBUSY;
297
298         if (FTRACE_WARN_ON(ops == &global_ops))
299                 return -EINVAL;
300
301         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
302                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
303                 if (!ret && ftrace_global_list == &ftrace_list_end)
304                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
305                 if (!ret)
306                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
307         } else
308                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
309
310         if (ret < 0)
311                 return ret;
312
313         if (ftrace_enabled)
314                 update_ftrace_function();
315
316         /*
317          * Dynamic ops may be freed, we must make sure that all
318          * callers are done before leaving this function.
319          */
320         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
321                 synchronize_sched();
322
323         return 0;
324 }
325
326 static void ftrace_update_pid_func(void)
327 {
328         /* Only do something if we are tracing something */
329         if (ftrace_trace_function == ftrace_stub)
330                 return;
331
332         update_ftrace_function();
333 }
334
335 #ifdef CONFIG_FUNCTION_PROFILER
336 struct ftrace_profile {
337         struct hlist_node               node;
338         unsigned long                   ip;
339         unsigned long                   counter;
340 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
341         unsigned long long              time;
342         unsigned long long              time_squared;
343 #endif
344 };
345
346 struct ftrace_profile_page {
347         struct ftrace_profile_page      *next;
348         unsigned long                   index;
349         struct ftrace_profile           records[];
350 };
351
352 struct ftrace_profile_stat {
353         atomic_t                        disabled;
354         struct hlist_head               *hash;
355         struct ftrace_profile_page      *pages;
356         struct ftrace_profile_page      *start;
357         struct tracer_stat              stat;
358 };
359
360 #define PROFILE_RECORDS_SIZE                                            \
361         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
362
363 #define PROFILES_PER_PAGE                                       \
364         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
365
366 static int ftrace_profile_bits __read_mostly;
367 static int ftrace_profile_enabled __read_mostly;
368
369 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
370 static DEFINE_MUTEX(ftrace_profile_lock);
371
372 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
373
374 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
375
376 static void *
377 function_stat_next(void *v, int idx)
378 {
379         struct ftrace_profile *rec = v;
380         struct ftrace_profile_page *pg;
381
382         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
383
384  again:
385         if (idx != 0)
386                 rec++;
387
388         if ((void *)rec >= (void *)&pg->records[pg->index]) {
389                 pg = pg->next;
390                 if (!pg)
391                         return NULL;
392                 rec = &pg->records[0];
393                 if (!rec->counter)
394                         goto again;
395         }
396
397         return rec;
398 }
399
400 static void *function_stat_start(struct tracer_stat *trace)
401 {
402         struct ftrace_profile_stat *stat =
403                 container_of(trace, struct ftrace_profile_stat, stat);
404
405         if (!stat || !stat->start)
406                 return NULL;
407
408         return function_stat_next(&stat->start->records[0], 0);
409 }
410
411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
412 /* function graph compares on total time */
413 static int function_stat_cmp(void *p1, void *p2)
414 {
415         struct ftrace_profile *a = p1;
416         struct ftrace_profile *b = p2;
417
418         if (a->time < b->time)
419                 return -1;
420         if (a->time > b->time)
421                 return 1;
422         else
423                 return 0;
424 }
425 #else
426 /* not function graph compares against hits */
427 static int function_stat_cmp(void *p1, void *p2)
428 {
429         struct ftrace_profile *a = p1;
430         struct ftrace_profile *b = p2;
431
432         if (a->counter < b->counter)
433                 return -1;
434         if (a->counter > b->counter)
435                 return 1;
436         else
437                 return 0;
438 }
439 #endif
440
441 static int function_stat_headers(struct seq_file *m)
442 {
443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
444         seq_printf(m, "  Function                               "
445                    "Hit    Time            Avg             s^2\n"
446                       "  --------                               "
447                    "---    ----            ---             ---\n");
448 #else
449         seq_printf(m, "  Function                               Hit\n"
450                       "  --------                               ---\n");
451 #endif
452         return 0;
453 }
454
455 static int function_stat_show(struct seq_file *m, void *v)
456 {
457         struct ftrace_profile *rec = v;
458         char str[KSYM_SYMBOL_LEN];
459         int ret = 0;
460 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
461         static struct trace_seq s;
462         unsigned long long avg;
463         unsigned long long stddev;
464 #endif
465         mutex_lock(&ftrace_profile_lock);
466
467         /* we raced with function_profile_reset() */
468         if (unlikely(rec->counter == 0)) {
469                 ret = -EBUSY;
470                 goto out;
471         }
472
473         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
474         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
475
476 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
477         seq_printf(m, "    ");
478         avg = rec->time;
479         do_div(avg, rec->counter);
480
481         /* Sample standard deviation (s^2) */
482         if (rec->counter <= 1)
483                 stddev = 0;
484         else {
485                 stddev = rec->time_squared - rec->counter * avg * avg;
486                 /*
487                  * Divide only 1000 for ns^2 -> us^2 conversion.
488                  * trace_print_graph_duration will divide 1000 again.
489                  */
490                 do_div(stddev, (rec->counter - 1) * 1000);
491         }
492
493         trace_seq_init(&s);
494         trace_print_graph_duration(rec->time, &s);
495         trace_seq_puts(&s, "    ");
496         trace_print_graph_duration(avg, &s);
497         trace_seq_puts(&s, "    ");
498         trace_print_graph_duration(stddev, &s);
499         trace_print_seq(m, &s);
500 #endif
501         seq_putc(m, '\n');
502 out:
503         mutex_unlock(&ftrace_profile_lock);
504
505         return ret;
506 }
507
508 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
509 {
510         struct ftrace_profile_page *pg;
511
512         pg = stat->pages = stat->start;
513
514         while (pg) {
515                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
516                 pg->index = 0;
517                 pg = pg->next;
518         }
519
520         memset(stat->hash, 0,
521                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
522 }
523
524 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
525 {
526         struct ftrace_profile_page *pg;
527         int functions;
528         int pages;
529         int i;
530
531         /* If we already allocated, do nothing */
532         if (stat->pages)
533                 return 0;
534
535         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
536         if (!stat->pages)
537                 return -ENOMEM;
538
539 #ifdef CONFIG_DYNAMIC_FTRACE
540         functions = ftrace_update_tot_cnt;
541 #else
542         /*
543          * We do not know the number of functions that exist because
544          * dynamic tracing is what counts them. With past experience
545          * we have around 20K functions. That should be more than enough.
546          * It is highly unlikely we will execute every function in
547          * the kernel.
548          */
549         functions = 20000;
550 #endif
551
552         pg = stat->start = stat->pages;
553
554         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
555
556         for (i = 0; i < pages; i++) {
557                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
558                 if (!pg->next)
559                         goto out_free;
560                 pg = pg->next;
561         }
562
563         return 0;
564
565  out_free:
566         pg = stat->start;
567         while (pg) {
568                 unsigned long tmp = (unsigned long)pg;
569
570                 pg = pg->next;
571                 free_page(tmp);
572         }
573
574         free_page((unsigned long)stat->pages);
575         stat->pages = NULL;
576         stat->start = NULL;
577
578         return -ENOMEM;
579 }
580
581 static int ftrace_profile_init_cpu(int cpu)
582 {
583         struct ftrace_profile_stat *stat;
584         int size;
585
586         stat = &per_cpu(ftrace_profile_stats, cpu);
587
588         if (stat->hash) {
589                 /* If the profile is already created, simply reset it */
590                 ftrace_profile_reset(stat);
591                 return 0;
592         }
593
594         /*
595          * We are profiling all functions, but usually only a few thousand
596          * functions are hit. We'll make a hash of 1024 items.
597          */
598         size = FTRACE_PROFILE_HASH_SIZE;
599
600         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
601
602         if (!stat->hash)
603                 return -ENOMEM;
604
605         if (!ftrace_profile_bits) {
606                 size--;
607
608                 for (; size; size >>= 1)
609                         ftrace_profile_bits++;
610         }
611
612         /* Preallocate the function profiling pages */
613         if (ftrace_profile_pages_init(stat) < 0) {
614                 kfree(stat->hash);
615                 stat->hash = NULL;
616                 return -ENOMEM;
617         }
618
619         return 0;
620 }
621
622 static int ftrace_profile_init(void)
623 {
624         int cpu;
625         int ret = 0;
626
627         for_each_online_cpu(cpu) {
628                 ret = ftrace_profile_init_cpu(cpu);
629                 if (ret)
630                         break;
631         }
632
633         return ret;
634 }
635
636 /* interrupts must be disabled */
637 static struct ftrace_profile *
638 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
639 {
640         struct ftrace_profile *rec;
641         struct hlist_head *hhd;
642         struct hlist_node *n;
643         unsigned long key;
644
645         key = hash_long(ip, ftrace_profile_bits);
646         hhd = &stat->hash[key];
647
648         if (hlist_empty(hhd))
649                 return NULL;
650
651         hlist_for_each_entry_rcu(rec, n, hhd, node) {
652                 if (rec->ip == ip)
653                         return rec;
654         }
655
656         return NULL;
657 }
658
659 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
660                                struct ftrace_profile *rec)
661 {
662         unsigned long key;
663
664         key = hash_long(rec->ip, ftrace_profile_bits);
665         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
666 }
667
668 /*
669  * The memory is already allocated, this simply finds a new record to use.
670  */
671 static struct ftrace_profile *
672 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
673 {
674         struct ftrace_profile *rec = NULL;
675
676         /* prevent recursion (from NMIs) */
677         if (atomic_inc_return(&stat->disabled) != 1)
678                 goto out;
679
680         /*
681          * Try to find the function again since an NMI
682          * could have added it
683          */
684         rec = ftrace_find_profiled_func(stat, ip);
685         if (rec)
686                 goto out;
687
688         if (stat->pages->index == PROFILES_PER_PAGE) {
689                 if (!stat->pages->next)
690                         goto out;
691                 stat->pages = stat->pages->next;
692         }
693
694         rec = &stat->pages->records[stat->pages->index++];
695         rec->ip = ip;
696         ftrace_add_profile(stat, rec);
697
698  out:
699         atomic_dec(&stat->disabled);
700
701         return rec;
702 }
703
704 static void
705 function_profile_call(unsigned long ip, unsigned long parent_ip)
706 {
707         struct ftrace_profile_stat *stat;
708         struct ftrace_profile *rec;
709         unsigned long flags;
710
711         if (!ftrace_profile_enabled)
712                 return;
713
714         local_irq_save(flags);
715
716         stat = &__get_cpu_var(ftrace_profile_stats);
717         if (!stat->hash || !ftrace_profile_enabled)
718                 goto out;
719
720         rec = ftrace_find_profiled_func(stat, ip);
721         if (!rec) {
722                 rec = ftrace_profile_alloc(stat, ip);
723                 if (!rec)
724                         goto out;
725         }
726
727         rec->counter++;
728  out:
729         local_irq_restore(flags);
730 }
731
732 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
733 static int profile_graph_entry(struct ftrace_graph_ent *trace)
734 {
735         function_profile_call(trace->func, 0);
736         return 1;
737 }
738
739 static void profile_graph_return(struct ftrace_graph_ret *trace)
740 {
741         struct ftrace_profile_stat *stat;
742         unsigned long long calltime;
743         struct ftrace_profile *rec;
744         unsigned long flags;
745
746         local_irq_save(flags);
747         stat = &__get_cpu_var(ftrace_profile_stats);
748         if (!stat->hash || !ftrace_profile_enabled)
749                 goto out;
750
751         /* If the calltime was zero'd ignore it */
752         if (!trace->calltime)
753                 goto out;
754
755         calltime = trace->rettime - trace->calltime;
756
757         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
758                 int index;
759
760                 index = trace->depth;
761
762                 /* Append this call time to the parent time to subtract */
763                 if (index)
764                         current->ret_stack[index - 1].subtime += calltime;
765
766                 if (current->ret_stack[index].subtime < calltime)
767                         calltime -= current->ret_stack[index].subtime;
768                 else
769                         calltime = 0;
770         }
771
772         rec = ftrace_find_profiled_func(stat, trace->func);
773         if (rec) {
774                 rec->time += calltime;
775                 rec->time_squared += calltime * calltime;
776         }
777
778  out:
779         local_irq_restore(flags);
780 }
781
782 static int register_ftrace_profiler(void)
783 {
784         return register_ftrace_graph(&profile_graph_return,
785                                      &profile_graph_entry);
786 }
787
788 static void unregister_ftrace_profiler(void)
789 {
790         unregister_ftrace_graph();
791 }
792 #else
793 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
794         .func           = function_profile_call,
795 };
796
797 static int register_ftrace_profiler(void)
798 {
799         return register_ftrace_function(&ftrace_profile_ops);
800 }
801
802 static void unregister_ftrace_profiler(void)
803 {
804         unregister_ftrace_function(&ftrace_profile_ops);
805 }
806 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
807
808 static ssize_t
809 ftrace_profile_write(struct file *filp, const char __user *ubuf,
810                      size_t cnt, loff_t *ppos)
811 {
812         unsigned long val;
813         int ret;
814
815         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
816         if (ret)
817                 return ret;
818
819         val = !!val;
820
821         mutex_lock(&ftrace_profile_lock);
822         if (ftrace_profile_enabled ^ val) {
823                 if (val) {
824                         ret = ftrace_profile_init();
825                         if (ret < 0) {
826                                 cnt = ret;
827                                 goto out;
828                         }
829
830                         ret = register_ftrace_profiler();
831                         if (ret < 0) {
832                                 cnt = ret;
833                                 goto out;
834                         }
835                         ftrace_profile_enabled = 1;
836                 } else {
837                         ftrace_profile_enabled = 0;
838                         /*
839                          * unregister_ftrace_profiler calls stop_machine
840                          * so this acts like an synchronize_sched.
841                          */
842                         unregister_ftrace_profiler();
843                 }
844         }
845  out:
846         mutex_unlock(&ftrace_profile_lock);
847
848         *ppos += cnt;
849
850         return cnt;
851 }
852
853 static ssize_t
854 ftrace_profile_read(struct file *filp, char __user *ubuf,
855                      size_t cnt, loff_t *ppos)
856 {
857         char buf[64];           /* big enough to hold a number */
858         int r;
859
860         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
861         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
862 }
863
864 static const struct file_operations ftrace_profile_fops = {
865         .open           = tracing_open_generic,
866         .read           = ftrace_profile_read,
867         .write          = ftrace_profile_write,
868         .llseek         = default_llseek,
869 };
870
871 /* used to initialize the real stat files */
872 static struct tracer_stat function_stats __initdata = {
873         .name           = "functions",
874         .stat_start     = function_stat_start,
875         .stat_next      = function_stat_next,
876         .stat_cmp       = function_stat_cmp,
877         .stat_headers   = function_stat_headers,
878         .stat_show      = function_stat_show
879 };
880
881 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
882 {
883         struct ftrace_profile_stat *stat;
884         struct dentry *entry;
885         char *name;
886         int ret;
887         int cpu;
888
889         for_each_possible_cpu(cpu) {
890                 stat = &per_cpu(ftrace_profile_stats, cpu);
891
892                 /* allocate enough for function name + cpu number */
893                 name = kmalloc(32, GFP_KERNEL);
894                 if (!name) {
895                         /*
896                          * The files created are permanent, if something happens
897                          * we still do not free memory.
898                          */
899                         WARN(1,
900                              "Could not allocate stat file for cpu %d\n",
901                              cpu);
902                         return;
903                 }
904                 stat->stat = function_stats;
905                 snprintf(name, 32, "function%d", cpu);
906                 stat->stat.name = name;
907                 ret = register_stat_tracer(&stat->stat);
908                 if (ret) {
909                         WARN(1,
910                              "Could not register function stat for cpu %d\n",
911                              cpu);
912                         kfree(name);
913                         return;
914                 }
915         }
916
917         entry = debugfs_create_file("function_profile_enabled", 0644,
918                                     d_tracer, NULL, &ftrace_profile_fops);
919         if (!entry)
920                 pr_warning("Could not create debugfs "
921                            "'function_profile_enabled' entry\n");
922 }
923
924 #else /* CONFIG_FUNCTION_PROFILER */
925 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
926 {
927 }
928 #endif /* CONFIG_FUNCTION_PROFILER */
929
930 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
931
932 #ifdef CONFIG_DYNAMIC_FTRACE
933
934 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
935 # error Dynamic ftrace depends on MCOUNT_RECORD
936 #endif
937
938 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
939
940 struct ftrace_func_probe {
941         struct hlist_node       node;
942         struct ftrace_probe_ops *ops;
943         unsigned long           flags;
944         unsigned long           ip;
945         void                    *data;
946         struct rcu_head         rcu;
947 };
948
949 enum {
950         FTRACE_UPDATE_CALLS             = (1 << 0),
951         FTRACE_DISABLE_CALLS            = (1 << 1),
952         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
953         FTRACE_START_FUNC_RET           = (1 << 3),
954         FTRACE_STOP_FUNC_RET            = (1 << 4),
955 };
956 struct ftrace_func_entry {
957         struct hlist_node hlist;
958         unsigned long ip;
959 };
960
961 struct ftrace_hash {
962         unsigned long           size_bits;
963         struct hlist_head       *buckets;
964         unsigned long           count;
965         struct rcu_head         rcu;
966 };
967
968 /*
969  * We make these constant because no one should touch them,
970  * but they are used as the default "empty hash", to avoid allocating
971  * it all the time. These are in a read only section such that if
972  * anyone does try to modify it, it will cause an exception.
973  */
974 static const struct hlist_head empty_buckets[1];
975 static const struct ftrace_hash empty_hash = {
976         .buckets = (struct hlist_head *)empty_buckets,
977 };
978 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
979
980 static struct ftrace_ops global_ops = {
981         .func                   = ftrace_stub,
982         .notrace_hash           = EMPTY_HASH,
983         .filter_hash            = EMPTY_HASH,
984 };
985
986 static struct dyn_ftrace *ftrace_new_addrs;
987
988 static DEFINE_MUTEX(ftrace_regex_lock);
989
990 struct ftrace_page {
991         struct ftrace_page      *next;
992         int                     index;
993         struct dyn_ftrace       records[];
994 };
995
996 #define ENTRIES_PER_PAGE \
997   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998
999 /* estimate from running different kernels */
1000 #define NR_TO_INIT              10000
1001
1002 static struct ftrace_page       *ftrace_pages_start;
1003 static struct ftrace_page       *ftrace_pages;
1004
1005 static struct dyn_ftrace *ftrace_free_records;
1006
1007 static struct ftrace_func_entry *
1008 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1009 {
1010         unsigned long key;
1011         struct ftrace_func_entry *entry;
1012         struct hlist_head *hhd;
1013         struct hlist_node *n;
1014
1015         if (!hash->count)
1016                 return NULL;
1017
1018         if (hash->size_bits > 0)
1019                 key = hash_long(ip, hash->size_bits);
1020         else
1021                 key = 0;
1022
1023         hhd = &hash->buckets[key];
1024
1025         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1026                 if (entry->ip == ip)
1027                         return entry;
1028         }
1029         return NULL;
1030 }
1031
1032 static void __add_hash_entry(struct ftrace_hash *hash,
1033                              struct ftrace_func_entry *entry)
1034 {
1035         struct hlist_head *hhd;
1036         unsigned long key;
1037
1038         if (hash->size_bits)
1039                 key = hash_long(entry->ip, hash->size_bits);
1040         else
1041                 key = 0;
1042
1043         hhd = &hash->buckets[key];
1044         hlist_add_head(&entry->hlist, hhd);
1045         hash->count++;
1046 }
1047
1048 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1049 {
1050         struct ftrace_func_entry *entry;
1051
1052         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1053         if (!entry)
1054                 return -ENOMEM;
1055
1056         entry->ip = ip;
1057         __add_hash_entry(hash, entry);
1058
1059         return 0;
1060 }
1061
1062 static void
1063 free_hash_entry(struct ftrace_hash *hash,
1064                   struct ftrace_func_entry *entry)
1065 {
1066         hlist_del(&entry->hlist);
1067         kfree(entry);
1068         hash->count--;
1069 }
1070
1071 static void
1072 remove_hash_entry(struct ftrace_hash *hash,
1073                   struct ftrace_func_entry *entry)
1074 {
1075         hlist_del(&entry->hlist);
1076         hash->count--;
1077 }
1078
1079 static void ftrace_hash_clear(struct ftrace_hash *hash)
1080 {
1081         struct hlist_head *hhd;
1082         struct hlist_node *tp, *tn;
1083         struct ftrace_func_entry *entry;
1084         int size = 1 << hash->size_bits;
1085         int i;
1086
1087         if (!hash->count)
1088                 return;
1089
1090         for (i = 0; i < size; i++) {
1091                 hhd = &hash->buckets[i];
1092                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1093                         free_hash_entry(hash, entry);
1094         }
1095         FTRACE_WARN_ON(hash->count);
1096 }
1097
1098 static void free_ftrace_hash(struct ftrace_hash *hash)
1099 {
1100         if (!hash || hash == EMPTY_HASH)
1101                 return;
1102         ftrace_hash_clear(hash);
1103         kfree(hash->buckets);
1104         kfree(hash);
1105 }
1106
1107 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1108 {
1109         struct ftrace_hash *hash;
1110
1111         hash = container_of(rcu, struct ftrace_hash, rcu);
1112         free_ftrace_hash(hash);
1113 }
1114
1115 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1116 {
1117         if (!hash || hash == EMPTY_HASH)
1118                 return;
1119         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1120 }
1121
1122 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123 {
1124         struct ftrace_hash *hash;
1125         int size;
1126
1127         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1128         if (!hash)
1129                 return NULL;
1130
1131         size = 1 << size_bits;
1132         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1133
1134         if (!hash->buckets) {
1135                 kfree(hash);
1136                 return NULL;
1137         }
1138
1139         hash->size_bits = size_bits;
1140
1141         return hash;
1142 }
1143
1144 static struct ftrace_hash *
1145 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1146 {
1147         struct ftrace_func_entry *entry;
1148         struct ftrace_hash *new_hash;
1149         struct hlist_node *tp;
1150         int size;
1151         int ret;
1152         int i;
1153
1154         new_hash = alloc_ftrace_hash(size_bits);
1155         if (!new_hash)
1156                 return NULL;
1157
1158         /* Empty hash? */
1159         if (!hash || !hash->count)
1160                 return new_hash;
1161
1162         size = 1 << hash->size_bits;
1163         for (i = 0; i < size; i++) {
1164                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1165                         ret = add_hash_entry(new_hash, entry->ip);
1166                         if (ret < 0)
1167                                 goto free_hash;
1168                 }
1169         }
1170
1171         FTRACE_WARN_ON(new_hash->count != hash->count);
1172
1173         return new_hash;
1174
1175  free_hash:
1176         free_ftrace_hash(new_hash);
1177         return NULL;
1178 }
1179
1180 static void
1181 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1182 static void
1183 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1184
1185 static int
1186 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1187                  struct ftrace_hash **dst, struct ftrace_hash *src)
1188 {
1189         struct ftrace_func_entry *entry;
1190         struct hlist_node *tp, *tn;
1191         struct hlist_head *hhd;
1192         struct ftrace_hash *old_hash;
1193         struct ftrace_hash *new_hash;
1194         unsigned long key;
1195         int size = src->count;
1196         int bits = 0;
1197         int ret;
1198         int i;
1199
1200         /*
1201          * Remove the current set, update the hash and add
1202          * them back.
1203          */
1204         ftrace_hash_rec_disable(ops, enable);
1205
1206         /*
1207          * If the new source is empty, just free dst and assign it
1208          * the empty_hash.
1209          */
1210         if (!src->count) {
1211                 free_ftrace_hash_rcu(*dst);
1212                 rcu_assign_pointer(*dst, EMPTY_HASH);
1213                 return 0;
1214         }
1215
1216         /*
1217          * Make the hash size about 1/2 the # found
1218          */
1219         for (size /= 2; size; size >>= 1)
1220                 bits++;
1221
1222         /* Don't allocate too much */
1223         if (bits > FTRACE_HASH_MAX_BITS)
1224                 bits = FTRACE_HASH_MAX_BITS;
1225
1226         ret = -ENOMEM;
1227         new_hash = alloc_ftrace_hash(bits);
1228         if (!new_hash)
1229                 goto out;
1230
1231         size = 1 << src->size_bits;
1232         for (i = 0; i < size; i++) {
1233                 hhd = &src->buckets[i];
1234                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1235                         if (bits > 0)
1236                                 key = hash_long(entry->ip, bits);
1237                         else
1238                                 key = 0;
1239                         remove_hash_entry(src, entry);
1240                         __add_hash_entry(new_hash, entry);
1241                 }
1242         }
1243
1244         old_hash = *dst;
1245         rcu_assign_pointer(*dst, new_hash);
1246         free_ftrace_hash_rcu(old_hash);
1247
1248         ret = 0;
1249  out:
1250         /*
1251          * Enable regardless of ret:
1252          *  On success, we enable the new hash.
1253          *  On failure, we re-enable the original hash.
1254          */
1255         ftrace_hash_rec_enable(ops, enable);
1256
1257         return ret;
1258 }
1259
1260 /*
1261  * Test the hashes for this ops to see if we want to call
1262  * the ops->func or not.
1263  *
1264  * It's a match if the ip is in the ops->filter_hash or
1265  * the filter_hash does not exist or is empty,
1266  *  AND
1267  * the ip is not in the ops->notrace_hash.
1268  *
1269  * This needs to be called with preemption disabled as
1270  * the hashes are freed with call_rcu_sched().
1271  */
1272 static int
1273 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1274 {
1275         struct ftrace_hash *filter_hash;
1276         struct ftrace_hash *notrace_hash;
1277         int ret;
1278
1279         filter_hash = rcu_dereference_raw(ops->filter_hash);
1280         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1281
1282         if ((!filter_hash || !filter_hash->count ||
1283              ftrace_lookup_ip(filter_hash, ip)) &&
1284             (!notrace_hash || !notrace_hash->count ||
1285              !ftrace_lookup_ip(notrace_hash, ip)))
1286                 ret = 1;
1287         else
1288                 ret = 0;
1289
1290         return ret;
1291 }
1292
1293 /*
1294  * This is a double for. Do not use 'break' to break out of the loop,
1295  * you must use a goto.
1296  */
1297 #define do_for_each_ftrace_rec(pg, rec)                                 \
1298         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1299                 int _____i;                                             \
1300                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1301                         rec = &pg->records[_____i];
1302
1303 #define while_for_each_ftrace_rec()             \
1304                 }                               \
1305         }
1306
1307 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1308                                      int filter_hash,
1309                                      bool inc)
1310 {
1311         struct ftrace_hash *hash;
1312         struct ftrace_hash *other_hash;
1313         struct ftrace_page *pg;
1314         struct dyn_ftrace *rec;
1315         int count = 0;
1316         int all = 0;
1317
1318         /* Only update if the ops has been registered */
1319         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1320                 return;
1321
1322         /*
1323          * In the filter_hash case:
1324          *   If the count is zero, we update all records.
1325          *   Otherwise we just update the items in the hash.
1326          *
1327          * In the notrace_hash case:
1328          *   We enable the update in the hash.
1329          *   As disabling notrace means enabling the tracing,
1330          *   and enabling notrace means disabling, the inc variable
1331          *   gets inversed.
1332          */
1333         if (filter_hash) {
1334                 hash = ops->filter_hash;
1335                 other_hash = ops->notrace_hash;
1336                 if (!hash || !hash->count)
1337                         all = 1;
1338         } else {
1339                 inc = !inc;
1340                 hash = ops->notrace_hash;
1341                 other_hash = ops->filter_hash;
1342                 /*
1343                  * If the notrace hash has no items,
1344                  * then there's nothing to do.
1345                  */
1346                 if (hash && !hash->count)
1347                         return;
1348         }
1349
1350         do_for_each_ftrace_rec(pg, rec) {
1351                 int in_other_hash = 0;
1352                 int in_hash = 0;
1353                 int match = 0;
1354
1355                 if (all) {
1356                         /*
1357                          * Only the filter_hash affects all records.
1358                          * Update if the record is not in the notrace hash.
1359                          */
1360                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1361                                 match = 1;
1362                 } else {
1363                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1364                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1365
1366                         /*
1367                          *
1368                          */
1369                         if (filter_hash && in_hash && !in_other_hash)
1370                                 match = 1;
1371                         else if (!filter_hash && in_hash &&
1372                                  (in_other_hash || !other_hash->count))
1373                                 match = 1;
1374                 }
1375                 if (!match)
1376                         continue;
1377
1378                 if (inc) {
1379                         rec->flags++;
1380                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1381                                 return;
1382                 } else {
1383                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1384                                 return;
1385                         rec->flags--;
1386                 }
1387                 count++;
1388                 /* Shortcut, if we handled all records, we are done. */
1389                 if (!all && count == hash->count)
1390                         return;
1391         } while_for_each_ftrace_rec();
1392 }
1393
1394 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1395                                     int filter_hash)
1396 {
1397         __ftrace_hash_rec_update(ops, filter_hash, 0);
1398 }
1399
1400 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1401                                    int filter_hash)
1402 {
1403         __ftrace_hash_rec_update(ops, filter_hash, 1);
1404 }
1405
1406 static void ftrace_free_rec(struct dyn_ftrace *rec)
1407 {
1408         rec->freelist = ftrace_free_records;
1409         ftrace_free_records = rec;
1410         rec->flags |= FTRACE_FL_FREE;
1411 }
1412
1413 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1414 {
1415         struct dyn_ftrace *rec;
1416
1417         /* First check for freed records */
1418         if (ftrace_free_records) {
1419                 rec = ftrace_free_records;
1420
1421                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1422                         FTRACE_WARN_ON_ONCE(1);
1423                         ftrace_free_records = NULL;
1424                         return NULL;
1425                 }
1426
1427                 ftrace_free_records = rec->freelist;
1428                 memset(rec, 0, sizeof(*rec));
1429                 return rec;
1430         }
1431
1432         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1433                 if (!ftrace_pages->next) {
1434                         /* allocate another page */
1435                         ftrace_pages->next =
1436                                 (void *)get_zeroed_page(GFP_KERNEL);
1437                         if (!ftrace_pages->next)
1438                                 return NULL;
1439                 }
1440                 ftrace_pages = ftrace_pages->next;
1441         }
1442
1443         return &ftrace_pages->records[ftrace_pages->index++];
1444 }
1445
1446 static struct dyn_ftrace *
1447 ftrace_record_ip(unsigned long ip)
1448 {
1449         struct dyn_ftrace *rec;
1450
1451         if (ftrace_disabled)
1452                 return NULL;
1453
1454         rec = ftrace_alloc_dyn_node(ip);
1455         if (!rec)
1456                 return NULL;
1457
1458         rec->ip = ip;
1459         rec->newlist = ftrace_new_addrs;
1460         ftrace_new_addrs = rec;
1461
1462         return rec;
1463 }
1464
1465 static void print_ip_ins(const char *fmt, unsigned char *p)
1466 {
1467         int i;
1468
1469         printk(KERN_CONT "%s", fmt);
1470
1471         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1472                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1473 }
1474
1475 static void ftrace_bug(int failed, unsigned long ip)
1476 {
1477         switch (failed) {
1478         case -EFAULT:
1479                 FTRACE_WARN_ON_ONCE(1);
1480                 pr_info("ftrace faulted on modifying ");
1481                 print_ip_sym(ip);
1482                 break;
1483         case -EINVAL:
1484                 FTRACE_WARN_ON_ONCE(1);
1485                 pr_info("ftrace failed to modify ");
1486                 print_ip_sym(ip);
1487                 print_ip_ins(" actual: ", (unsigned char *)ip);
1488                 printk(KERN_CONT "\n");
1489                 break;
1490         case -EPERM:
1491                 FTRACE_WARN_ON_ONCE(1);
1492                 pr_info("ftrace faulted on writing ");
1493                 print_ip_sym(ip);
1494                 break;
1495         default:
1496                 FTRACE_WARN_ON_ONCE(1);
1497                 pr_info("ftrace faulted on unknown error ");
1498                 print_ip_sym(ip);
1499         }
1500 }
1501
1502
1503 /* Return 1 if the address range is reserved for ftrace */
1504 int ftrace_text_reserved(void *start, void *end)
1505 {
1506         struct dyn_ftrace *rec;
1507         struct ftrace_page *pg;
1508
1509         do_for_each_ftrace_rec(pg, rec) {
1510                 if (rec->ip <= (unsigned long)end &&
1511                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1512                         return 1;
1513         } while_for_each_ftrace_rec();
1514         return 0;
1515 }
1516
1517
1518 static int
1519 __ftrace_replace_code(struct dyn_ftrace *rec, int update)
1520 {
1521         unsigned long ftrace_addr;
1522         unsigned long flag = 0UL;
1523
1524         ftrace_addr = (unsigned long)FTRACE_ADDR;
1525
1526         /*
1527          * If we are updating calls:
1528          *
1529          *   If the record has a ref count, then we need to enable it
1530          *   because someone is using it.
1531          *
1532          *   Otherwise we make sure its disabled.
1533          *
1534          * If we are disabling calls, then disable all records that
1535          * are enabled.
1536          */
1537         if (update && (rec->flags & ~FTRACE_FL_MASK))
1538                 flag = FTRACE_FL_ENABLED;
1539
1540         /* If the state of this record hasn't changed, then do nothing */
1541         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1542                 return 0;
1543
1544         if (flag) {
1545                 rec->flags |= FTRACE_FL_ENABLED;
1546                 return ftrace_make_call(rec, ftrace_addr);
1547         }
1548
1549         rec->flags &= ~FTRACE_FL_ENABLED;
1550         return ftrace_make_nop(NULL, rec, ftrace_addr);
1551 }
1552
1553 static void ftrace_replace_code(int update)
1554 {
1555         struct dyn_ftrace *rec;
1556         struct ftrace_page *pg;
1557         int failed;
1558
1559         if (unlikely(ftrace_disabled))
1560                 return;
1561
1562         do_for_each_ftrace_rec(pg, rec) {
1563                 /* Skip over free records */
1564                 if (rec->flags & FTRACE_FL_FREE)
1565                         continue;
1566
1567                 failed = __ftrace_replace_code(rec, update);
1568                 if (failed) {
1569                         ftrace_bug(failed, rec->ip);
1570                         /* Stop processing */
1571                         return;
1572                 }
1573         } while_for_each_ftrace_rec();
1574 }
1575
1576 static int
1577 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1578 {
1579         unsigned long ip;
1580         int ret;
1581
1582         ip = rec->ip;
1583
1584         if (unlikely(ftrace_disabled))
1585                 return 0;
1586
1587         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1588         if (ret) {
1589                 ftrace_bug(ret, ip);
1590                 return 0;
1591         }
1592         return 1;
1593 }
1594
1595 /*
1596  * archs can override this function if they must do something
1597  * before the modifying code is performed.
1598  */
1599 int __weak ftrace_arch_code_modify_prepare(void)
1600 {
1601         return 0;
1602 }
1603
1604 /*
1605  * archs can override this function if they must do something
1606  * after the modifying code is performed.
1607  */
1608 int __weak ftrace_arch_code_modify_post_process(void)
1609 {
1610         return 0;
1611 }
1612
1613 static int __ftrace_modify_code(void *data)
1614 {
1615         int *command = data;
1616
1617         /*
1618          * Do not call function tracer while we update the code.
1619          * We are in stop machine, no worrying about races.
1620          */
1621         function_trace_stop++;
1622
1623         if (*command & FTRACE_UPDATE_CALLS)
1624                 ftrace_replace_code(1);
1625         else if (*command & FTRACE_DISABLE_CALLS)
1626                 ftrace_replace_code(0);
1627
1628         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1629                 ftrace_update_ftrace_func(ftrace_trace_function);
1630
1631         if (*command & FTRACE_START_FUNC_RET)
1632                 ftrace_enable_ftrace_graph_caller();
1633         else if (*command & FTRACE_STOP_FUNC_RET)
1634                 ftrace_disable_ftrace_graph_caller();
1635
1636 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1637         /*
1638          * For archs that call ftrace_test_stop_func(), we must
1639          * wait till after we update all the function callers
1640          * before we update the callback. This keeps different
1641          * ops that record different functions from corrupting
1642          * each other.
1643          */
1644         __ftrace_trace_function = __ftrace_trace_function_delay;
1645 #endif
1646         function_trace_stop--;
1647
1648         return 0;
1649 }
1650
1651 static void ftrace_run_update_code(int command)
1652 {
1653         int ret;
1654
1655         ret = ftrace_arch_code_modify_prepare();
1656         FTRACE_WARN_ON(ret);
1657         if (ret)
1658                 return;
1659
1660         stop_machine(__ftrace_modify_code, &command, NULL);
1661
1662         ret = ftrace_arch_code_modify_post_process();
1663         FTRACE_WARN_ON(ret);
1664 }
1665
1666 static ftrace_func_t saved_ftrace_func;
1667 static int ftrace_start_up;
1668 static int global_start_up;
1669
1670 static void ftrace_startup_enable(int command)
1671 {
1672         if (saved_ftrace_func != ftrace_trace_function) {
1673                 saved_ftrace_func = ftrace_trace_function;
1674                 command |= FTRACE_UPDATE_TRACE_FUNC;
1675         }
1676
1677         if (!command || !ftrace_enabled)
1678                 return;
1679
1680         ftrace_run_update_code(command);
1681 }
1682
1683 static int ftrace_startup(struct ftrace_ops *ops, int command)
1684 {
1685         bool hash_enable = true;
1686
1687         if (unlikely(ftrace_disabled))
1688                 return -ENODEV;
1689
1690         ftrace_start_up++;
1691         command |= FTRACE_UPDATE_CALLS;
1692
1693         /* ops marked global share the filter hashes */
1694         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1695                 ops = &global_ops;
1696                 /* Don't update hash if global is already set */
1697                 if (global_start_up)
1698                         hash_enable = false;
1699                 global_start_up++;
1700         }
1701
1702         ops->flags |= FTRACE_OPS_FL_ENABLED;
1703         if (hash_enable)
1704                 ftrace_hash_rec_enable(ops, 1);
1705
1706         ftrace_startup_enable(command);
1707
1708         return 0;
1709 }
1710
1711 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1712 {
1713         bool hash_disable = true;
1714
1715         if (unlikely(ftrace_disabled))
1716                 return;
1717
1718         ftrace_start_up--;
1719         /*
1720          * Just warn in case of unbalance, no need to kill ftrace, it's not
1721          * critical but the ftrace_call callers may be never nopped again after
1722          * further ftrace uses.
1723          */
1724         WARN_ON_ONCE(ftrace_start_up < 0);
1725
1726         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1727                 ops = &global_ops;
1728                 global_start_up--;
1729                 WARN_ON_ONCE(global_start_up < 0);
1730                 /* Don't update hash if global still has users */
1731                 if (global_start_up) {
1732                         WARN_ON_ONCE(!ftrace_start_up);
1733                         hash_disable = false;
1734                 }
1735         }
1736
1737         if (hash_disable)
1738                 ftrace_hash_rec_disable(ops, 1);
1739
1740         if (ops != &global_ops || !global_start_up)
1741                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1742
1743         command |= FTRACE_UPDATE_CALLS;
1744
1745         if (saved_ftrace_func != ftrace_trace_function) {
1746                 saved_ftrace_func = ftrace_trace_function;
1747                 command |= FTRACE_UPDATE_TRACE_FUNC;
1748         }
1749
1750         if (!command || !ftrace_enabled)
1751                 return;
1752
1753         ftrace_run_update_code(command);
1754 }
1755
1756 static void ftrace_startup_sysctl(void)
1757 {
1758         if (unlikely(ftrace_disabled))
1759                 return;
1760
1761         /* Force update next time */
1762         saved_ftrace_func = NULL;
1763         /* ftrace_start_up is true if we want ftrace running */
1764         if (ftrace_start_up)
1765                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
1766 }
1767
1768 static void ftrace_shutdown_sysctl(void)
1769 {
1770         if (unlikely(ftrace_disabled))
1771                 return;
1772
1773         /* ftrace_start_up is true if ftrace is running */
1774         if (ftrace_start_up)
1775                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1776 }
1777
1778 static cycle_t          ftrace_update_time;
1779 static unsigned long    ftrace_update_cnt;
1780 unsigned long           ftrace_update_tot_cnt;
1781
1782 static int ops_traces_mod(struct ftrace_ops *ops)
1783 {
1784         struct ftrace_hash *hash;
1785
1786         hash = ops->filter_hash;
1787         return !!(!hash || !hash->count);
1788 }
1789
1790 static int ftrace_update_code(struct module *mod)
1791 {
1792         struct dyn_ftrace *p;
1793         cycle_t start, stop;
1794         unsigned long ref = 0;
1795
1796         /*
1797          * When adding a module, we need to check if tracers are
1798          * currently enabled and if they are set to trace all functions.
1799          * If they are, we need to enable the module functions as well
1800          * as update the reference counts for those function records.
1801          */
1802         if (mod) {
1803                 struct ftrace_ops *ops;
1804
1805                 for (ops = ftrace_ops_list;
1806                      ops != &ftrace_list_end; ops = ops->next) {
1807                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1808                             ops_traces_mod(ops))
1809                                 ref++;
1810                 }
1811         }
1812
1813         start = ftrace_now(raw_smp_processor_id());
1814         ftrace_update_cnt = 0;
1815
1816         while (ftrace_new_addrs) {
1817
1818                 /* If something went wrong, bail without enabling anything */
1819                 if (unlikely(ftrace_disabled))
1820                         return -1;
1821
1822                 p = ftrace_new_addrs;
1823                 ftrace_new_addrs = p->newlist;
1824                 p->flags = ref;
1825
1826                 /*
1827                  * Do the initial record conversion from mcount jump
1828                  * to the NOP instructions.
1829                  */
1830                 if (!ftrace_code_disable(mod, p)) {
1831                         ftrace_free_rec(p);
1832                         /* Game over */
1833                         break;
1834                 }
1835
1836                 ftrace_update_cnt++;
1837
1838                 /*
1839                  * If the tracing is enabled, go ahead and enable the record.
1840                  *
1841                  * The reason not to enable the record immediatelly is the
1842                  * inherent check of ftrace_make_nop/ftrace_make_call for
1843                  * correct previous instructions.  Making first the NOP
1844                  * conversion puts the module to the correct state, thus
1845                  * passing the ftrace_make_call check.
1846                  */
1847                 if (ftrace_start_up && ref) {
1848                         int failed = __ftrace_replace_code(p, 1);
1849                         if (failed) {
1850                                 ftrace_bug(failed, p->ip);
1851                                 ftrace_free_rec(p);
1852                         }
1853                 }
1854         }
1855
1856         stop = ftrace_now(raw_smp_processor_id());
1857         ftrace_update_time = stop - start;
1858         ftrace_update_tot_cnt += ftrace_update_cnt;
1859
1860         return 0;
1861 }
1862
1863 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1864 {
1865         struct ftrace_page *pg;
1866         int cnt;
1867         int i;
1868
1869         /* allocate a few pages */
1870         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1871         if (!ftrace_pages_start)
1872                 return -1;
1873
1874         /*
1875          * Allocate a few more pages.
1876          *
1877          * TODO: have some parser search vmlinux before
1878          *   final linking to find all calls to ftrace.
1879          *   Then we can:
1880          *    a) know how many pages to allocate.
1881          *     and/or
1882          *    b) set up the table then.
1883          *
1884          *  The dynamic code is still necessary for
1885          *  modules.
1886          */
1887
1888         pg = ftrace_pages = ftrace_pages_start;
1889
1890         cnt = num_to_init / ENTRIES_PER_PAGE;
1891         pr_info("ftrace: allocating %ld entries in %d pages\n",
1892                 num_to_init, cnt + 1);
1893
1894         for (i = 0; i < cnt; i++) {
1895                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1896
1897                 /* If we fail, we'll try later anyway */
1898                 if (!pg->next)
1899                         break;
1900
1901                 pg = pg->next;
1902         }
1903
1904         return 0;
1905 }
1906
1907 enum {
1908         FTRACE_ITER_FILTER      = (1 << 0),
1909         FTRACE_ITER_NOTRACE     = (1 << 1),
1910         FTRACE_ITER_PRINTALL    = (1 << 2),
1911         FTRACE_ITER_HASH        = (1 << 3),
1912         FTRACE_ITER_ENABLED     = (1 << 4),
1913 };
1914
1915 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1916
1917 struct ftrace_iterator {
1918         loff_t                          pos;
1919         loff_t                          func_pos;
1920         struct ftrace_page              *pg;
1921         struct dyn_ftrace               *func;
1922         struct ftrace_func_probe        *probe;
1923         struct trace_parser             parser;
1924         struct ftrace_hash              *hash;
1925         struct ftrace_ops               *ops;
1926         int                             hidx;
1927         int                             idx;
1928         unsigned                        flags;
1929 };
1930
1931 static void *
1932 t_hash_next(struct seq_file *m, loff_t *pos)
1933 {
1934         struct ftrace_iterator *iter = m->private;
1935         struct hlist_node *hnd = NULL;
1936         struct hlist_head *hhd;
1937
1938         (*pos)++;
1939         iter->pos = *pos;
1940
1941         if (iter->probe)
1942                 hnd = &iter->probe->node;
1943  retry:
1944         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1945                 return NULL;
1946
1947         hhd = &ftrace_func_hash[iter->hidx];
1948
1949         if (hlist_empty(hhd)) {
1950                 iter->hidx++;
1951                 hnd = NULL;
1952                 goto retry;
1953         }
1954
1955         if (!hnd)
1956                 hnd = hhd->first;
1957         else {
1958                 hnd = hnd->next;
1959                 if (!hnd) {
1960                         iter->hidx++;
1961                         goto retry;
1962                 }
1963         }
1964
1965         if (WARN_ON_ONCE(!hnd))
1966                 return NULL;
1967
1968         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1969
1970         return iter;
1971 }
1972
1973 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1974 {
1975         struct ftrace_iterator *iter = m->private;
1976         void *p = NULL;
1977         loff_t l;
1978
1979         if (iter->func_pos > *pos)
1980                 return NULL;
1981
1982         iter->hidx = 0;
1983         for (l = 0; l <= (*pos - iter->func_pos); ) {
1984                 p = t_hash_next(m, &l);
1985                 if (!p)
1986                         break;
1987         }
1988         if (!p)
1989                 return NULL;
1990
1991         /* Only set this if we have an item */
1992         iter->flags |= FTRACE_ITER_HASH;
1993
1994         return iter;
1995 }
1996
1997 static int
1998 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1999 {
2000         struct ftrace_func_probe *rec;
2001
2002         rec = iter->probe;
2003         if (WARN_ON_ONCE(!rec))
2004                 return -EIO;
2005
2006         if (rec->ops->print)
2007                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2008
2009         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2010
2011         if (rec->data)
2012                 seq_printf(m, ":%p", rec->data);
2013         seq_putc(m, '\n');
2014
2015         return 0;
2016 }
2017
2018 static void *
2019 t_next(struct seq_file *m, void *v, loff_t *pos)
2020 {
2021         struct ftrace_iterator *iter = m->private;
2022         struct ftrace_ops *ops = &global_ops;
2023         struct dyn_ftrace *rec = NULL;
2024
2025         if (unlikely(ftrace_disabled))
2026                 return NULL;
2027
2028         if (iter->flags & FTRACE_ITER_HASH)
2029                 return t_hash_next(m, pos);
2030
2031         (*pos)++;
2032         iter->pos = iter->func_pos = *pos;
2033
2034         if (iter->flags & FTRACE_ITER_PRINTALL)
2035                 return t_hash_start(m, pos);
2036
2037  retry:
2038         if (iter->idx >= iter->pg->index) {
2039                 if (iter->pg->next) {
2040                         iter->pg = iter->pg->next;
2041                         iter->idx = 0;
2042                         goto retry;
2043                 }
2044         } else {
2045                 rec = &iter->pg->records[iter->idx++];
2046                 if ((rec->flags & FTRACE_FL_FREE) ||
2047
2048                     ((iter->flags & FTRACE_ITER_FILTER) &&
2049                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2050
2051                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2052                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2053
2054                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2055                      !(rec->flags & ~FTRACE_FL_MASK))) {
2056
2057                         rec = NULL;
2058                         goto retry;
2059                 }
2060         }
2061
2062         if (!rec)
2063                 return t_hash_start(m, pos);
2064
2065         iter->func = rec;
2066
2067         return iter;
2068 }
2069
2070 static void reset_iter_read(struct ftrace_iterator *iter)
2071 {
2072         iter->pos = 0;
2073         iter->func_pos = 0;
2074         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2075 }
2076
2077 static void *t_start(struct seq_file *m, loff_t *pos)
2078 {
2079         struct ftrace_iterator *iter = m->private;
2080         struct ftrace_ops *ops = &global_ops;
2081         void *p = NULL;
2082         loff_t l;
2083
2084         mutex_lock(&ftrace_lock);
2085
2086         if (unlikely(ftrace_disabled))
2087                 return NULL;
2088
2089         /*
2090          * If an lseek was done, then reset and start from beginning.
2091          */
2092         if (*pos < iter->pos)
2093                 reset_iter_read(iter);
2094
2095         /*
2096          * For set_ftrace_filter reading, if we have the filter
2097          * off, we can short cut and just print out that all
2098          * functions are enabled.
2099          */
2100         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2101                 if (*pos > 0)
2102                         return t_hash_start(m, pos);
2103                 iter->flags |= FTRACE_ITER_PRINTALL;
2104                 /* reset in case of seek/pread */
2105                 iter->flags &= ~FTRACE_ITER_HASH;
2106                 return iter;
2107         }
2108
2109         if (iter->flags & FTRACE_ITER_HASH)
2110                 return t_hash_start(m, pos);
2111
2112         /*
2113          * Unfortunately, we need to restart at ftrace_pages_start
2114          * every time we let go of the ftrace_mutex. This is because
2115          * those pointers can change without the lock.
2116          */
2117         iter->pg = ftrace_pages_start;
2118         iter->idx = 0;
2119         for (l = 0; l <= *pos; ) {
2120                 p = t_next(m, p, &l);
2121                 if (!p)
2122                         break;
2123         }
2124
2125         if (!p) {
2126                 if (iter->flags & FTRACE_ITER_FILTER)
2127                         return t_hash_start(m, pos);
2128
2129                 return NULL;
2130         }
2131
2132         return iter;
2133 }
2134
2135 static void t_stop(struct seq_file *m, void *p)
2136 {
2137         mutex_unlock(&ftrace_lock);
2138 }
2139
2140 static int t_show(struct seq_file *m, void *v)
2141 {
2142         struct ftrace_iterator *iter = m->private;
2143         struct dyn_ftrace *rec;
2144
2145         if (iter->flags & FTRACE_ITER_HASH)
2146                 return t_hash_show(m, iter);
2147
2148         if (iter->flags & FTRACE_ITER_PRINTALL) {
2149                 seq_printf(m, "#### all functions enabled ####\n");
2150                 return 0;
2151         }
2152
2153         rec = iter->func;
2154
2155         if (!rec)
2156                 return 0;
2157
2158         seq_printf(m, "%ps", (void *)rec->ip);
2159         if (iter->flags & FTRACE_ITER_ENABLED)
2160                 seq_printf(m, " (%ld)",
2161                            rec->flags & ~FTRACE_FL_MASK);
2162         seq_printf(m, "\n");
2163
2164         return 0;
2165 }
2166
2167 static const struct seq_operations show_ftrace_seq_ops = {
2168         .start = t_start,
2169         .next = t_next,
2170         .stop = t_stop,
2171         .show = t_show,
2172 };
2173
2174 static int
2175 ftrace_avail_open(struct inode *inode, struct file *file)
2176 {
2177         struct ftrace_iterator *iter;
2178         int ret;
2179
2180         if (unlikely(ftrace_disabled))
2181                 return -ENODEV;
2182
2183         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2184         if (!iter)
2185                 return -ENOMEM;
2186
2187         iter->pg = ftrace_pages_start;
2188
2189         ret = seq_open(file, &show_ftrace_seq_ops);
2190         if (!ret) {
2191                 struct seq_file *m = file->private_data;
2192
2193                 m->private = iter;
2194         } else {
2195                 kfree(iter);
2196         }
2197
2198         return ret;
2199 }
2200
2201 static int
2202 ftrace_enabled_open(struct inode *inode, struct file *file)
2203 {
2204         struct ftrace_iterator *iter;
2205         int ret;
2206
2207         if (unlikely(ftrace_disabled))
2208                 return -ENODEV;
2209
2210         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2211         if (!iter)
2212                 return -ENOMEM;
2213
2214         iter->pg = ftrace_pages_start;
2215         iter->flags = FTRACE_ITER_ENABLED;
2216
2217         ret = seq_open(file, &show_ftrace_seq_ops);
2218         if (!ret) {
2219                 struct seq_file *m = file->private_data;
2220
2221                 m->private = iter;
2222         } else {
2223                 kfree(iter);
2224         }
2225
2226         return ret;
2227 }
2228
2229 static void ftrace_filter_reset(struct ftrace_hash *hash)
2230 {
2231         mutex_lock(&ftrace_lock);
2232         ftrace_hash_clear(hash);
2233         mutex_unlock(&ftrace_lock);
2234 }
2235
2236 static int
2237 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2238                   struct inode *inode, struct file *file)
2239 {
2240         struct ftrace_iterator *iter;
2241         struct ftrace_hash *hash;
2242         int ret = 0;
2243
2244         if (unlikely(ftrace_disabled))
2245                 return -ENODEV;
2246
2247         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2248         if (!iter)
2249                 return -ENOMEM;
2250
2251         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2252                 kfree(iter);
2253                 return -ENOMEM;
2254         }
2255
2256         if (flag & FTRACE_ITER_NOTRACE)
2257                 hash = ops->notrace_hash;
2258         else
2259                 hash = ops->filter_hash;
2260
2261         iter->ops = ops;
2262         iter->flags = flag;
2263
2264         if (file->f_mode & FMODE_WRITE) {
2265                 mutex_lock(&ftrace_lock);
2266                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2267                 mutex_unlock(&ftrace_lock);
2268
2269                 if (!iter->hash) {
2270                         trace_parser_put(&iter->parser);
2271                         kfree(iter);
2272                         return -ENOMEM;
2273                 }
2274         }
2275
2276         mutex_lock(&ftrace_regex_lock);
2277
2278         if ((file->f_mode & FMODE_WRITE) &&
2279             (file->f_flags & O_TRUNC))
2280                 ftrace_filter_reset(iter->hash);
2281
2282         if (file->f_mode & FMODE_READ) {
2283                 iter->pg = ftrace_pages_start;
2284
2285                 ret = seq_open(file, &show_ftrace_seq_ops);
2286                 if (!ret) {
2287                         struct seq_file *m = file->private_data;
2288                         m->private = iter;
2289                 } else {
2290                         /* Failed */
2291                         free_ftrace_hash(iter->hash);
2292                         trace_parser_put(&iter->parser);
2293                         kfree(iter);
2294                 }
2295         } else
2296                 file->private_data = iter;
2297         mutex_unlock(&ftrace_regex_lock);
2298
2299         return ret;
2300 }
2301
2302 static int
2303 ftrace_filter_open(struct inode *inode, struct file *file)
2304 {
2305         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2306                                  inode, file);
2307 }
2308
2309 static int
2310 ftrace_notrace_open(struct inode *inode, struct file *file)
2311 {
2312         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2313                                  inode, file);
2314 }
2315
2316 static loff_t
2317 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2318 {
2319         loff_t ret;
2320
2321         if (file->f_mode & FMODE_READ)
2322                 ret = seq_lseek(file, offset, origin);
2323         else
2324                 file->f_pos = ret = 1;
2325
2326         return ret;
2327 }
2328
2329 static int ftrace_match(char *str, char *regex, int len, int type)
2330 {
2331         int matched = 0;
2332         int slen;
2333
2334         switch (type) {
2335         case MATCH_FULL:
2336                 if (strcmp(str, regex) == 0)
2337                         matched = 1;
2338                 break;
2339         case MATCH_FRONT_ONLY:
2340                 if (strncmp(str, regex, len) == 0)
2341                         matched = 1;
2342                 break;
2343         case MATCH_MIDDLE_ONLY:
2344                 if (strstr(str, regex))
2345                         matched = 1;
2346                 break;
2347         case MATCH_END_ONLY:
2348                 slen = strlen(str);
2349                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2350                         matched = 1;
2351                 break;
2352         }
2353
2354         return matched;
2355 }
2356
2357 static int
2358 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2359 {
2360         struct ftrace_func_entry *entry;
2361         int ret = 0;
2362
2363         entry = ftrace_lookup_ip(hash, rec->ip);
2364         if (not) {
2365                 /* Do nothing if it doesn't exist */
2366                 if (!entry)
2367                         return 0;
2368
2369                 free_hash_entry(hash, entry);
2370         } else {
2371                 /* Do nothing if it exists */
2372                 if (entry)
2373                         return 0;
2374
2375                 ret = add_hash_entry(hash, rec->ip);
2376         }
2377         return ret;
2378 }
2379
2380 static int
2381 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2382                     char *regex, int len, int type)
2383 {
2384         char str[KSYM_SYMBOL_LEN];
2385         char *modname;
2386
2387         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2388
2389         if (mod) {
2390                 /* module lookup requires matching the module */
2391                 if (!modname || strcmp(modname, mod))
2392                         return 0;
2393
2394                 /* blank search means to match all funcs in the mod */
2395                 if (!len)
2396                         return 1;
2397         }
2398
2399         return ftrace_match(str, regex, len, type);
2400 }
2401
2402 static int
2403 match_records(struct ftrace_hash *hash, char *buff,
2404               int len, char *mod, int not)
2405 {
2406         unsigned search_len = 0;
2407         struct ftrace_page *pg;
2408         struct dyn_ftrace *rec;
2409         int type = MATCH_FULL;
2410         char *search = buff;
2411         int found = 0;
2412         int ret;
2413
2414         if (len) {
2415                 type = filter_parse_regex(buff, len, &search, &not);
2416                 search_len = strlen(search);
2417         }
2418
2419         mutex_lock(&ftrace_lock);
2420
2421         if (unlikely(ftrace_disabled))
2422                 goto out_unlock;
2423
2424         do_for_each_ftrace_rec(pg, rec) {
2425
2426                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2427                         ret = enter_record(hash, rec, not);
2428                         if (ret < 0) {
2429                                 found = ret;
2430                                 goto out_unlock;
2431                         }
2432                         found = 1;
2433                 }
2434         } while_for_each_ftrace_rec();
2435  out_unlock:
2436         mutex_unlock(&ftrace_lock);
2437
2438         return found;
2439 }
2440
2441 static int
2442 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2443 {
2444         return match_records(hash, buff, len, NULL, 0);
2445 }
2446
2447 static int
2448 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2449 {
2450         int not = 0;
2451
2452         /* blank or '*' mean the same */
2453         if (strcmp(buff, "*") == 0)
2454                 buff[0] = 0;
2455
2456         /* handle the case of 'dont filter this module' */
2457         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2458                 buff[0] = 0;
2459                 not = 1;
2460         }
2461
2462         return match_records(hash, buff, strlen(buff), mod, not);
2463 }
2464
2465 /*
2466  * We register the module command as a template to show others how
2467  * to register the a command as well.
2468  */
2469
2470 static int
2471 ftrace_mod_callback(struct ftrace_hash *hash,
2472                     char *func, char *cmd, char *param, int enable)
2473 {
2474         char *mod;
2475         int ret = -EINVAL;
2476
2477         /*
2478          * cmd == 'mod' because we only registered this func
2479          * for the 'mod' ftrace_func_command.
2480          * But if you register one func with multiple commands,
2481          * you can tell which command was used by the cmd
2482          * parameter.
2483          */
2484
2485         /* we must have a module name */
2486         if (!param)
2487                 return ret;
2488
2489         mod = strsep(&param, ":");
2490         if (!strlen(mod))
2491                 return ret;
2492
2493         ret = ftrace_match_module_records(hash, func, mod);
2494         if (!ret)
2495                 ret = -EINVAL;
2496         if (ret < 0)
2497                 return ret;
2498
2499         return 0;
2500 }
2501
2502 static struct ftrace_func_command ftrace_mod_cmd = {
2503         .name                   = "mod",
2504         .func                   = ftrace_mod_callback,
2505 };
2506
2507 static int __init ftrace_mod_cmd_init(void)
2508 {
2509         return register_ftrace_command(&ftrace_mod_cmd);
2510 }
2511 device_initcall(ftrace_mod_cmd_init);
2512
2513 static void
2514 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2515 {
2516         struct ftrace_func_probe *entry;
2517         struct hlist_head *hhd;
2518         struct hlist_node *n;
2519         unsigned long key;
2520
2521         key = hash_long(ip, FTRACE_HASH_BITS);
2522
2523         hhd = &ftrace_func_hash[key];
2524
2525         if (hlist_empty(hhd))
2526                 return;
2527
2528         /*
2529          * Disable preemption for these calls to prevent a RCU grace
2530          * period. This syncs the hash iteration and freeing of items
2531          * on the hash. rcu_read_lock is too dangerous here.
2532          */
2533         preempt_disable_notrace();
2534         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2535                 if (entry->ip == ip)
2536                         entry->ops->func(ip, parent_ip, &entry->data);
2537         }
2538         preempt_enable_notrace();
2539 }
2540
2541 static struct ftrace_ops trace_probe_ops __read_mostly =
2542 {
2543         .func           = function_trace_probe_call,
2544 };
2545
2546 static int ftrace_probe_registered;
2547
2548 static void __enable_ftrace_function_probe(void)
2549 {
2550         int ret;
2551         int i;
2552
2553         if (ftrace_probe_registered)
2554                 return;
2555
2556         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2557                 struct hlist_head *hhd = &ftrace_func_hash[i];
2558                 if (hhd->first)
2559                         break;
2560         }
2561         /* Nothing registered? */
2562         if (i == FTRACE_FUNC_HASHSIZE)
2563                 return;
2564
2565         ret = __register_ftrace_function(&trace_probe_ops);
2566         if (!ret)
2567                 ret = ftrace_startup(&trace_probe_ops, 0);
2568
2569         ftrace_probe_registered = 1;
2570 }
2571
2572 static void __disable_ftrace_function_probe(void)
2573 {
2574         int ret;
2575         int i;
2576
2577         if (!ftrace_probe_registered)
2578                 return;
2579
2580         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2581                 struct hlist_head *hhd = &ftrace_func_hash[i];
2582                 if (hhd->first)
2583                         return;
2584         }
2585
2586         /* no more funcs left */
2587         ret = __unregister_ftrace_function(&trace_probe_ops);
2588         if (!ret)
2589                 ftrace_shutdown(&trace_probe_ops, 0);
2590
2591         ftrace_probe_registered = 0;
2592 }
2593
2594
2595 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2596 {
2597         struct ftrace_func_probe *entry =
2598                 container_of(rhp, struct ftrace_func_probe, rcu);
2599
2600         if (entry->ops->free)
2601                 entry->ops->free(&entry->data);
2602         kfree(entry);
2603 }
2604
2605
2606 int
2607 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2608                               void *data)
2609 {
2610         struct ftrace_func_probe *entry;
2611         struct ftrace_page *pg;
2612         struct dyn_ftrace *rec;
2613         int type, len, not;
2614         unsigned long key;
2615         int count = 0;
2616         char *search;
2617
2618         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2619         len = strlen(search);
2620
2621         /* we do not support '!' for function probes */
2622         if (WARN_ON(not))
2623                 return -EINVAL;
2624
2625         mutex_lock(&ftrace_lock);
2626
2627         if (unlikely(ftrace_disabled))
2628                 goto out_unlock;
2629
2630         do_for_each_ftrace_rec(pg, rec) {
2631
2632                 if (!ftrace_match_record(rec, NULL, search, len, type))
2633                         continue;
2634
2635                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2636                 if (!entry) {
2637                         /* If we did not process any, then return error */
2638                         if (!count)
2639                                 count = -ENOMEM;
2640                         goto out_unlock;
2641                 }
2642
2643                 count++;
2644
2645                 entry->data = data;
2646
2647                 /*
2648                  * The caller might want to do something special
2649                  * for each function we find. We call the callback
2650                  * to give the caller an opportunity to do so.
2651                  */
2652                 if (ops->callback) {
2653                         if (ops->callback(rec->ip, &entry->data) < 0) {
2654                                 /* caller does not like this func */
2655                                 kfree(entry);
2656                                 continue;
2657                         }
2658                 }
2659
2660                 entry->ops = ops;
2661                 entry->ip = rec->ip;
2662
2663                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2664                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2665
2666         } while_for_each_ftrace_rec();
2667         __enable_ftrace_function_probe();
2668
2669  out_unlock:
2670         mutex_unlock(&ftrace_lock);
2671
2672         return count;
2673 }
2674
2675 enum {
2676         PROBE_TEST_FUNC         = 1,
2677         PROBE_TEST_DATA         = 2
2678 };
2679
2680 static void
2681 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2682                                   void *data, int flags)
2683 {
2684         struct ftrace_func_probe *entry;
2685         struct hlist_node *n, *tmp;
2686         char str[KSYM_SYMBOL_LEN];
2687         int type = MATCH_FULL;
2688         int i, len = 0;
2689         char *search;
2690
2691         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2692                 glob = NULL;
2693         else if (glob) {
2694                 int not;
2695
2696                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2697                 len = strlen(search);
2698
2699                 /* we do not support '!' for function probes */
2700                 if (WARN_ON(not))
2701                         return;
2702         }
2703
2704         mutex_lock(&ftrace_lock);
2705         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2706                 struct hlist_head *hhd = &ftrace_func_hash[i];
2707
2708                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2709
2710                         /* break up if statements for readability */
2711                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2712                                 continue;
2713
2714                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2715                                 continue;
2716
2717                         /* do this last, since it is the most expensive */
2718                         if (glob) {
2719                                 kallsyms_lookup(entry->ip, NULL, NULL,
2720                                                 NULL, str);
2721                                 if (!ftrace_match(str, glob, len, type))
2722                                         continue;
2723                         }
2724
2725                         hlist_del(&entry->node);
2726                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2727                 }
2728         }
2729         __disable_ftrace_function_probe();
2730         mutex_unlock(&ftrace_lock);
2731 }
2732
2733 void
2734 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2735                                 void *data)
2736 {
2737         __unregister_ftrace_function_probe(glob, ops, data,
2738                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2739 }
2740
2741 void
2742 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2743 {
2744         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2745 }
2746
2747 void unregister_ftrace_function_probe_all(char *glob)
2748 {
2749         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2750 }
2751
2752 static LIST_HEAD(ftrace_commands);
2753 static DEFINE_MUTEX(ftrace_cmd_mutex);
2754
2755 int register_ftrace_command(struct ftrace_func_command *cmd)
2756 {
2757         struct ftrace_func_command *p;
2758         int ret = 0;
2759
2760         mutex_lock(&ftrace_cmd_mutex);
2761         list_for_each_entry(p, &ftrace_commands, list) {
2762                 if (strcmp(cmd->name, p->name) == 0) {
2763                         ret = -EBUSY;
2764                         goto out_unlock;
2765                 }
2766         }
2767         list_add(&cmd->list, &ftrace_commands);
2768  out_unlock:
2769         mutex_unlock(&ftrace_cmd_mutex);
2770
2771         return ret;
2772 }
2773
2774 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2775 {
2776         struct ftrace_func_command *p, *n;
2777         int ret = -ENODEV;
2778
2779         mutex_lock(&ftrace_cmd_mutex);
2780         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2781                 if (strcmp(cmd->name, p->name) == 0) {
2782                         ret = 0;
2783                         list_del_init(&p->list);
2784                         goto out_unlock;
2785                 }
2786         }
2787  out_unlock:
2788         mutex_unlock(&ftrace_cmd_mutex);
2789
2790         return ret;
2791 }
2792
2793 static int ftrace_process_regex(struct ftrace_hash *hash,
2794                                 char *buff, int len, int enable)
2795 {
2796         char *func, *command, *next = buff;
2797         struct ftrace_func_command *p;
2798         int ret = -EINVAL;
2799
2800         func = strsep(&next, ":");
2801
2802         if (!next) {
2803                 ret = ftrace_match_records(hash, func, len);
2804                 if (!ret)
2805                         ret = -EINVAL;
2806                 if (ret < 0)
2807                         return ret;
2808                 return 0;
2809         }
2810
2811         /* command found */
2812
2813         command = strsep(&next, ":");
2814
2815         mutex_lock(&ftrace_cmd_mutex);
2816         list_for_each_entry(p, &ftrace_commands, list) {
2817                 if (strcmp(p->name, command) == 0) {
2818                         ret = p->func(hash, func, command, next, enable);
2819                         goto out_unlock;
2820                 }
2821         }
2822  out_unlock:
2823         mutex_unlock(&ftrace_cmd_mutex);
2824
2825         return ret;
2826 }
2827
2828 static ssize_t
2829 ftrace_regex_write(struct file *file, const char __user *ubuf,
2830                    size_t cnt, loff_t *ppos, int enable)
2831 {
2832         struct ftrace_iterator *iter;
2833         struct trace_parser *parser;
2834         ssize_t ret, read;
2835
2836         if (!cnt)
2837                 return 0;
2838
2839         mutex_lock(&ftrace_regex_lock);
2840
2841         ret = -ENODEV;
2842         if (unlikely(ftrace_disabled))
2843                 goto out_unlock;
2844
2845         if (file->f_mode & FMODE_READ) {
2846                 struct seq_file *m = file->private_data;
2847                 iter = m->private;
2848         } else
2849                 iter = file->private_data;
2850
2851         parser = &iter->parser;
2852         read = trace_get_user(parser, ubuf, cnt, ppos);
2853
2854         if (read >= 0 && trace_parser_loaded(parser) &&
2855             !trace_parser_cont(parser)) {
2856                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2857                                            parser->idx, enable);
2858                 trace_parser_clear(parser);
2859                 if (ret)
2860                         goto out_unlock;
2861         }
2862
2863         ret = read;
2864 out_unlock:
2865         mutex_unlock(&ftrace_regex_lock);
2866
2867         return ret;
2868 }
2869
2870 static ssize_t
2871 ftrace_filter_write(struct file *file, const char __user *ubuf,
2872                     size_t cnt, loff_t *ppos)
2873 {
2874         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2875 }
2876
2877 static ssize_t
2878 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2879                      size_t cnt, loff_t *ppos)
2880 {
2881         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2882 }
2883
2884 static int
2885 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2886                  int reset, int enable)
2887 {
2888         struct ftrace_hash **orig_hash;
2889         struct ftrace_hash *hash;
2890         int ret;
2891
2892         /* All global ops uses the global ops filters */
2893         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2894                 ops = &global_ops;
2895
2896         if (unlikely(ftrace_disabled))
2897                 return -ENODEV;
2898
2899         if (enable)
2900                 orig_hash = &ops->filter_hash;
2901         else
2902                 orig_hash = &ops->notrace_hash;
2903
2904         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2905         if (!hash)
2906                 return -ENOMEM;
2907
2908         mutex_lock(&ftrace_regex_lock);
2909         if (reset)
2910                 ftrace_filter_reset(hash);
2911         if (buf)
2912                 ftrace_match_records(hash, buf, len);
2913
2914         mutex_lock(&ftrace_lock);
2915         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2916         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2917             && ftrace_enabled)
2918                 ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2919
2920         mutex_unlock(&ftrace_lock);
2921
2922         mutex_unlock(&ftrace_regex_lock);
2923
2924         free_ftrace_hash(hash);
2925         return ret;
2926 }
2927
2928 /**
2929  * ftrace_set_filter - set a function to filter on in ftrace
2930  * @ops - the ops to set the filter with
2931  * @buf - the string that holds the function filter text.
2932  * @len - the length of the string.
2933  * @reset - non zero to reset all filters before applying this filter.
2934  *
2935  * Filters denote which functions should be enabled when tracing is enabled.
2936  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2937  */
2938 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2939                        int len, int reset)
2940 {
2941         ftrace_set_regex(ops, buf, len, reset, 1);
2942 }
2943 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2944
2945 /**
2946  * ftrace_set_notrace - set a function to not trace in ftrace
2947  * @ops - the ops to set the notrace filter with
2948  * @buf - the string that holds the function notrace text.
2949  * @len - the length of the string.
2950  * @reset - non zero to reset all filters before applying this filter.
2951  *
2952  * Notrace Filters denote which functions should not be enabled when tracing
2953  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2954  * for tracing.
2955  */
2956 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2957                         int len, int reset)
2958 {
2959         ftrace_set_regex(ops, buf, len, reset, 0);
2960 }
2961 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2962 /**
2963  * ftrace_set_filter - set a function to filter on in ftrace
2964  * @ops - the ops to set the filter with
2965  * @buf - the string that holds the function filter text.
2966  * @len - the length of the string.
2967  * @reset - non zero to reset all filters before applying this filter.
2968  *
2969  * Filters denote which functions should be enabled when tracing is enabled.
2970  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2971  */
2972 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2973 {
2974         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2975 }
2976 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2977
2978 /**
2979  * ftrace_set_notrace - set a function to not trace in ftrace
2980  * @ops - the ops to set the notrace filter with
2981  * @buf - the string that holds the function notrace text.
2982  * @len - the length of the string.
2983  * @reset - non zero to reset all filters before applying this filter.
2984  *
2985  * Notrace Filters denote which functions should not be enabled when tracing
2986  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2987  * for tracing.
2988  */
2989 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2990 {
2991         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2992 }
2993 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2994
2995 /*
2996  * command line interface to allow users to set filters on boot up.
2997  */
2998 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2999 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3000 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3001
3002 static int __init set_ftrace_notrace(char *str)
3003 {
3004         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3005         return 1;
3006 }
3007 __setup("ftrace_notrace=", set_ftrace_notrace);
3008
3009 static int __init set_ftrace_filter(char *str)
3010 {
3011         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3012         return 1;
3013 }
3014 __setup("ftrace_filter=", set_ftrace_filter);
3015
3016 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3017 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3018 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3019
3020 static int __init set_graph_function(char *str)
3021 {
3022         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3023         return 1;
3024 }
3025 __setup("ftrace_graph_filter=", set_graph_function);
3026
3027 static void __init set_ftrace_early_graph(char *buf)
3028 {
3029         int ret;
3030         char *func;
3031
3032         while (buf) {
3033                 func = strsep(&buf, ",");
3034                 /* we allow only one expression at a time */
3035                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3036                                       func);
3037                 if (ret)
3038                         printk(KERN_DEBUG "ftrace: function %s not "
3039                                           "traceable\n", func);
3040         }
3041 }
3042 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3043
3044 static void __init
3045 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3046 {
3047         char *func;
3048
3049         while (buf) {
3050                 func = strsep(&buf, ",");
3051                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3052         }
3053 }
3054
3055 static void __init set_ftrace_early_filters(void)
3056 {
3057         if (ftrace_filter_buf[0])
3058                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3059         if (ftrace_notrace_buf[0])
3060                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3061 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3062         if (ftrace_graph_buf[0])
3063                 set_ftrace_early_graph(ftrace_graph_buf);
3064 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3065 }
3066
3067 static int
3068 ftrace_regex_release(struct inode *inode, struct file *file)
3069 {
3070         struct seq_file *m = (struct seq_file *)file->private_data;
3071         struct ftrace_iterator *iter;
3072         struct ftrace_hash **orig_hash;
3073         struct trace_parser *parser;
3074         int filter_hash;
3075         int ret;
3076
3077         mutex_lock(&ftrace_regex_lock);
3078         if (file->f_mode & FMODE_READ) {
3079                 iter = m->private;
3080
3081                 seq_release(inode, file);
3082         } else
3083                 iter = file->private_data;
3084
3085         parser = &iter->parser;
3086         if (trace_parser_loaded(parser)) {
3087                 parser->buffer[parser->idx] = 0;
3088                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3089         }
3090
3091         trace_parser_put(parser);
3092
3093         if (file->f_mode & FMODE_WRITE) {
3094                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3095
3096                 if (filter_hash)
3097                         orig_hash = &iter->ops->filter_hash;
3098                 else
3099                         orig_hash = &iter->ops->notrace_hash;
3100
3101                 mutex_lock(&ftrace_lock);
3102                 ret = ftrace_hash_move(iter->ops, filter_hash,
3103                                        orig_hash, iter->hash);
3104                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3105                     && ftrace_enabled)
3106                         ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3107
3108                 mutex_unlock(&ftrace_lock);
3109         }
3110         free_ftrace_hash(iter->hash);
3111         kfree(iter);
3112
3113         mutex_unlock(&ftrace_regex_lock);
3114         return 0;
3115 }
3116
3117 static const struct file_operations ftrace_avail_fops = {
3118         .open = ftrace_avail_open,
3119         .read = seq_read,
3120         .llseek = seq_lseek,
3121         .release = seq_release_private,
3122 };
3123
3124 static const struct file_operations ftrace_enabled_fops = {
3125         .open = ftrace_enabled_open,
3126         .read = seq_read,
3127         .llseek = seq_lseek,
3128         .release = seq_release_private,
3129 };
3130
3131 static const struct file_operations ftrace_filter_fops = {
3132         .open = ftrace_filter_open,
3133         .read = seq_read,
3134         .write = ftrace_filter_write,
3135         .llseek = ftrace_regex_lseek,
3136         .release = ftrace_regex_release,
3137 };
3138
3139 static const struct file_operations ftrace_notrace_fops = {
3140         .open = ftrace_notrace_open,
3141         .read = seq_read,
3142         .write = ftrace_notrace_write,
3143         .llseek = ftrace_regex_lseek,
3144         .release = ftrace_regex_release,
3145 };
3146
3147 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3148
3149 static DEFINE_MUTEX(graph_lock);
3150
3151 int ftrace_graph_count;
3152 int ftrace_graph_filter_enabled;
3153 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3154
3155 static void *
3156 __g_next(struct seq_file *m, loff_t *pos)
3157 {
3158         if (*pos >= ftrace_graph_count)
3159                 return NULL;
3160         return &ftrace_graph_funcs[*pos];
3161 }
3162
3163 static void *
3164 g_next(struct seq_file *m, void *v, loff_t *pos)
3165 {
3166         (*pos)++;
3167         return __g_next(m, pos);
3168 }
3169
3170 static void *g_start(struct seq_file *m, loff_t *pos)
3171 {
3172         mutex_lock(&graph_lock);
3173
3174         /* Nothing, tell g_show to print all functions are enabled */
3175         if (!ftrace_graph_filter_enabled && !*pos)
3176                 return (void *)1;
3177
3178         return __g_next(m, pos);
3179 }
3180
3181 static void g_stop(struct seq_file *m, void *p)
3182 {
3183         mutex_unlock(&graph_lock);
3184 }
3185
3186 static int g_show(struct seq_file *m, void *v)
3187 {
3188         unsigned long *ptr = v;
3189
3190         if (!ptr)
3191                 return 0;
3192
3193         if (ptr == (unsigned long *)1) {
3194                 seq_printf(m, "#### all functions enabled ####\n");
3195                 return 0;
3196         }
3197
3198         seq_printf(m, "%ps\n", (void *)*ptr);
3199
3200         return 0;
3201 }
3202
3203 static const struct seq_operations ftrace_graph_seq_ops = {
3204         .start = g_start,
3205         .next = g_next,
3206         .stop = g_stop,
3207         .show = g_show,
3208 };
3209
3210 static int
3211 ftrace_graph_open(struct inode *inode, struct file *file)
3212 {
3213         int ret = 0;
3214
3215         if (unlikely(ftrace_disabled))
3216                 return -ENODEV;
3217
3218         mutex_lock(&graph_lock);
3219         if ((file->f_mode & FMODE_WRITE) &&
3220             (file->f_flags & O_TRUNC)) {
3221                 ftrace_graph_filter_enabled = 0;
3222                 ftrace_graph_count = 0;
3223                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3224         }
3225         mutex_unlock(&graph_lock);
3226
3227         if (file->f_mode & FMODE_READ)
3228                 ret = seq_open(file, &ftrace_graph_seq_ops);
3229
3230         return ret;
3231 }
3232
3233 static int
3234 ftrace_graph_release(struct inode *inode, struct file *file)
3235 {
3236         if (file->f_mode & FMODE_READ)
3237                 seq_release(inode, file);
3238         return 0;
3239 }
3240
3241 static int
3242 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3243 {
3244         struct dyn_ftrace *rec;
3245         struct ftrace_page *pg;
3246         int search_len;
3247         int fail = 1;
3248         int type, not;
3249         char *search;
3250         bool exists;
3251         int i;
3252
3253         /* decode regex */
3254         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3255         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3256                 return -EBUSY;
3257
3258         search_len = strlen(search);
3259
3260         mutex_lock(&ftrace_lock);
3261
3262         if (unlikely(ftrace_disabled)) {
3263                 mutex_unlock(&ftrace_lock);
3264                 return -ENODEV;
3265         }
3266
3267         do_for_each_ftrace_rec(pg, rec) {
3268
3269                 if (rec->flags & FTRACE_FL_FREE)
3270                         continue;
3271
3272                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3273                         /* if it is in the array */
3274                         exists = false;
3275                         for (i = 0; i < *idx; i++) {
3276                                 if (array[i] == rec->ip) {
3277                                         exists = true;
3278                                         break;
3279                                 }
3280                         }
3281
3282                         if (!not) {
3283                                 fail = 0;
3284                                 if (!exists) {
3285                                         array[(*idx)++] = rec->ip;
3286                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3287                                                 goto out;
3288                                 }
3289                         } else {
3290                                 if (exists) {
3291                                         array[i] = array[--(*idx)];
3292                                         array[*idx] = 0;
3293                                         fail = 0;
3294                                 }
3295                         }
3296                 }
3297         } while_for_each_ftrace_rec();
3298 out:
3299         mutex_unlock(&ftrace_lock);
3300
3301         if (fail)
3302                 return -EINVAL;
3303
3304         ftrace_graph_filter_enabled = 1;
3305         return 0;
3306 }
3307
3308 static ssize_t
3309 ftrace_graph_write(struct file *file, const char __user *ubuf,
3310                    size_t cnt, loff_t *ppos)
3311 {
3312         struct trace_parser parser;
3313         ssize_t read, ret;
3314
3315         if (!cnt)
3316                 return 0;
3317
3318         mutex_lock(&graph_lock);
3319
3320         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3321                 ret = -ENOMEM;
3322                 goto out_unlock;
3323         }
3324
3325         read = trace_get_user(&parser, ubuf, cnt, ppos);
3326
3327         if (read >= 0 && trace_parser_loaded((&parser))) {
3328                 parser.buffer[parser.idx] = 0;
3329
3330                 /* we allow only one expression at a time */
3331                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3332                                         parser.buffer);
3333                 if (ret)
3334                         goto out_free;
3335         }
3336
3337         ret = read;
3338
3339 out_free:
3340         trace_parser_put(&parser);
3341 out_unlock:
3342         mutex_unlock(&graph_lock);
3343
3344         return ret;
3345 }
3346
3347 static const struct file_operations ftrace_graph_fops = {
3348         .open           = ftrace_graph_open,
3349         .read           = seq_read,
3350         .write          = ftrace_graph_write,
3351         .release        = ftrace_graph_release,
3352         .llseek         = seq_lseek,
3353 };
3354 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3355
3356 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3357 {
3358
3359         trace_create_file("available_filter_functions", 0444,
3360                         d_tracer, NULL, &ftrace_avail_fops);
3361
3362         trace_create_file("enabled_functions", 0444,
3363                         d_tracer, NULL, &ftrace_enabled_fops);
3364
3365         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3366                         NULL, &ftrace_filter_fops);
3367
3368         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3369                                     NULL, &ftrace_notrace_fops);
3370
3371 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3372         trace_create_file("set_graph_function", 0444, d_tracer,
3373                                     NULL,
3374                                     &ftrace_graph_fops);
3375 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3376
3377         return 0;
3378 }
3379
3380 static int ftrace_process_locs(struct module *mod,
3381                                unsigned long *start,
3382                                unsigned long *end)
3383 {
3384         unsigned long *p;
3385         unsigned long addr;
3386         unsigned long flags = 0; /* Shut up gcc */
3387
3388         mutex_lock(&ftrace_lock);
3389         p = start;
3390         while (p < end) {
3391                 addr = ftrace_call_adjust(*p++);
3392                 /*
3393                  * Some architecture linkers will pad between
3394                  * the different mcount_loc sections of different
3395                  * object files to satisfy alignments.
3396                  * Skip any NULL pointers.
3397                  */
3398                 if (!addr)
3399                         continue;
3400                 ftrace_record_ip(addr);
3401         }
3402
3403         /*
3404          * We only need to disable interrupts on start up
3405          * because we are modifying code that an interrupt
3406          * may execute, and the modification is not atomic.
3407          * But for modules, nothing runs the code we modify
3408          * until we are finished with it, and there's no
3409          * reason to cause large interrupt latencies while we do it.
3410          */
3411         if (!mod)
3412                 local_irq_save(flags);
3413         ftrace_update_code(mod);
3414         if (!mod)
3415                 local_irq_restore(flags);
3416         mutex_unlock(&ftrace_lock);
3417
3418         return 0;
3419 }
3420
3421 #ifdef CONFIG_MODULES
3422 void ftrace_release_mod(struct module *mod)
3423 {
3424         struct dyn_ftrace *rec;
3425         struct ftrace_page *pg;
3426
3427         mutex_lock(&ftrace_lock);
3428
3429         if (ftrace_disabled)
3430                 goto out_unlock;
3431
3432         do_for_each_ftrace_rec(pg, rec) {
3433                 if (within_module_core(rec->ip, mod)) {
3434                         /*
3435                          * rec->ip is changed in ftrace_free_rec()
3436                          * It should not between s and e if record was freed.
3437                          */
3438                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3439                         ftrace_free_rec(rec);
3440                 }
3441         } while_for_each_ftrace_rec();
3442  out_unlock:
3443         mutex_unlock(&ftrace_lock);
3444 }
3445
3446 static void ftrace_init_module(struct module *mod,
3447                                unsigned long *start, unsigned long *end)
3448 {
3449         if (ftrace_disabled || start == end)
3450                 return;
3451         ftrace_process_locs(mod, start, end);
3452 }
3453
3454 static int ftrace_module_notify(struct notifier_block *self,
3455                                 unsigned long val, void *data)
3456 {
3457         struct module *mod = data;
3458
3459         switch (val) {
3460         case MODULE_STATE_COMING:
3461                 ftrace_init_module(mod, mod->ftrace_callsites,
3462                                    mod->ftrace_callsites +
3463                                    mod->num_ftrace_callsites);
3464                 break;
3465         case MODULE_STATE_GOING:
3466                 ftrace_release_mod(mod);
3467                 break;
3468         }
3469
3470         return 0;
3471 }
3472 #else
3473 static int ftrace_module_notify(struct notifier_block *self,
3474                                 unsigned long val, void *data)
3475 {
3476         return 0;
3477 }
3478 #endif /* CONFIG_MODULES */
3479
3480 struct notifier_block ftrace_module_nb = {
3481         .notifier_call = ftrace_module_notify,
3482         .priority = 0,
3483 };
3484
3485 extern unsigned long __start_mcount_loc[];
3486 extern unsigned long __stop_mcount_loc[];
3487
3488 void __init ftrace_init(void)
3489 {
3490         unsigned long count, addr, flags;
3491         int ret;
3492
3493         /* Keep the ftrace pointer to the stub */
3494         addr = (unsigned long)ftrace_stub;
3495
3496         local_irq_save(flags);
3497         ftrace_dyn_arch_init(&addr);
3498         local_irq_restore(flags);
3499
3500         /* ftrace_dyn_arch_init places the return code in addr */
3501         if (addr)
3502                 goto failed;
3503
3504         count = __stop_mcount_loc - __start_mcount_loc;
3505
3506         ret = ftrace_dyn_table_alloc(count);
3507         if (ret)
3508                 goto failed;
3509
3510         last_ftrace_enabled = ftrace_enabled = 1;
3511
3512         ret = ftrace_process_locs(NULL,
3513                                   __start_mcount_loc,
3514                                   __stop_mcount_loc);
3515
3516         ret = register_module_notifier(&ftrace_module_nb);
3517         if (ret)
3518                 pr_warning("Failed to register trace ftrace module notifier\n");
3519
3520         set_ftrace_early_filters();
3521
3522         return;
3523  failed:
3524         ftrace_disabled = 1;
3525 }
3526
3527 #else
3528
3529 static struct ftrace_ops global_ops = {
3530         .func                   = ftrace_stub,
3531 };
3532
3533 static int __init ftrace_nodyn_init(void)
3534 {
3535         ftrace_enabled = 1;
3536         return 0;
3537 }
3538 device_initcall(ftrace_nodyn_init);
3539
3540 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3541 static inline void ftrace_startup_enable(int command) { }
3542 /* Keep as macros so we do not need to define the commands */
3543 # define ftrace_startup(ops, command)                   \
3544         ({                                              \
3545                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3546                 0;                                      \
3547         })
3548 # define ftrace_shutdown(ops, command)  do { } while (0)
3549 # define ftrace_startup_sysctl()        do { } while (0)
3550 # define ftrace_shutdown_sysctl()       do { } while (0)
3551
3552 static inline int
3553 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3554 {
3555         return 1;
3556 }
3557
3558 #endif /* CONFIG_DYNAMIC_FTRACE */
3559
3560 static void
3561 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3562 {
3563         struct ftrace_ops *op;
3564
3565         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3566                 return;
3567
3568         trace_recursion_set(TRACE_INTERNAL_BIT);
3569         /*
3570          * Some of the ops may be dynamically allocated,
3571          * they must be freed after a synchronize_sched().
3572          */
3573         preempt_disable_notrace();
3574         op = rcu_dereference_raw(ftrace_ops_list);
3575         while (op != &ftrace_list_end) {
3576                 if (ftrace_ops_test(op, ip))
3577                         op->func(ip, parent_ip);
3578                 op = rcu_dereference_raw(op->next);
3579         };
3580         preempt_enable_notrace();
3581         trace_recursion_clear(TRACE_INTERNAL_BIT);
3582 }
3583
3584 static void clear_ftrace_swapper(void)
3585 {
3586         struct task_struct *p;
3587         int cpu;
3588
3589         get_online_cpus();
3590         for_each_online_cpu(cpu) {
3591                 p = idle_task(cpu);
3592                 clear_tsk_trace_trace(p);
3593         }
3594         put_online_cpus();
3595 }
3596
3597 static void set_ftrace_swapper(void)
3598 {
3599         struct task_struct *p;
3600         int cpu;
3601
3602         get_online_cpus();
3603         for_each_online_cpu(cpu) {
3604                 p = idle_task(cpu);
3605                 set_tsk_trace_trace(p);
3606         }
3607         put_online_cpus();
3608 }
3609
3610 static void clear_ftrace_pid(struct pid *pid)
3611 {
3612         struct task_struct *p;
3613
3614         rcu_read_lock();
3615         do_each_pid_task(pid, PIDTYPE_PID, p) {
3616                 clear_tsk_trace_trace(p);
3617         } while_each_pid_task(pid, PIDTYPE_PID, p);
3618         rcu_read_unlock();
3619
3620         put_pid(pid);
3621 }
3622
3623 static void set_ftrace_pid(struct pid *pid)
3624 {
3625         struct task_struct *p;
3626
3627         rcu_read_lock();
3628         do_each_pid_task(pid, PIDTYPE_PID, p) {
3629                 set_tsk_trace_trace(p);
3630         } while_each_pid_task(pid, PIDTYPE_PID, p);
3631         rcu_read_unlock();
3632 }
3633
3634 static void clear_ftrace_pid_task(struct pid *pid)
3635 {
3636         if (pid == ftrace_swapper_pid)
3637                 clear_ftrace_swapper();
3638         else
3639                 clear_ftrace_pid(pid);
3640 }
3641
3642 static void set_ftrace_pid_task(struct pid *pid)
3643 {
3644         if (pid == ftrace_swapper_pid)
3645                 set_ftrace_swapper();
3646         else
3647                 set_ftrace_pid(pid);
3648 }
3649
3650 static int ftrace_pid_add(int p)
3651 {
3652         struct pid *pid;
3653         struct ftrace_pid *fpid;
3654         int ret = -EINVAL;
3655
3656         mutex_lock(&ftrace_lock);
3657
3658         if (!p)
3659                 pid = ftrace_swapper_pid;
3660         else
3661                 pid = find_get_pid(p);
3662
3663         if (!pid)
3664                 goto out;
3665
3666         ret = 0;
3667
3668         list_for_each_entry(fpid, &ftrace_pids, list)
3669                 if (fpid->pid == pid)
3670                         goto out_put;
3671
3672         ret = -ENOMEM;
3673
3674         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3675         if (!fpid)
3676                 goto out_put;
3677
3678         list_add(&fpid->list, &ftrace_pids);
3679         fpid->pid = pid;
3680
3681         set_ftrace_pid_task(pid);
3682
3683         ftrace_update_pid_func();
3684         ftrace_startup_enable(0);
3685
3686         mutex_unlock(&ftrace_lock);
3687         return 0;
3688
3689 out_put:
3690         if (pid != ftrace_swapper_pid)
3691                 put_pid(pid);
3692
3693 out:
3694         mutex_unlock(&ftrace_lock);
3695         return ret;
3696 }
3697
3698 static void ftrace_pid_reset(void)
3699 {
3700         struct ftrace_pid *fpid, *safe;
3701
3702         mutex_lock(&ftrace_lock);
3703         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3704                 struct pid *pid = fpid->pid;
3705
3706                 clear_ftrace_pid_task(pid);
3707
3708                 list_del(&fpid->list);
3709                 kfree(fpid);
3710         }
3711
3712         ftrace_update_pid_func();
3713         ftrace_startup_enable(0);
3714
3715         mutex_unlock(&ftrace_lock);
3716 }
3717
3718 static void *fpid_start(struct seq_file *m, loff_t *pos)
3719 {
3720         mutex_lock(&ftrace_lock);
3721
3722         if (list_empty(&ftrace_pids) && (!*pos))
3723                 return (void *) 1;
3724
3725         return seq_list_start(&ftrace_pids, *pos);
3726 }
3727
3728 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3729 {
3730         if (v == (void *)1)
3731                 return NULL;
3732
3733         return seq_list_next(v, &ftrace_pids, pos);
3734 }
3735
3736 static void fpid_stop(struct seq_file *m, void *p)
3737 {
3738         mutex_unlock(&ftrace_lock);
3739 }
3740
3741 static int fpid_show(struct seq_file *m, void *v)
3742 {
3743         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3744
3745         if (v == (void *)1) {
3746                 seq_printf(m, "no pid\n");
3747                 return 0;
3748         }
3749
3750         if (fpid->pid == ftrace_swapper_pid)
3751                 seq_printf(m, "swapper tasks\n");
3752         else
3753                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3754
3755         return 0;
3756 }
3757
3758 static const struct seq_operations ftrace_pid_sops = {
3759         .start = fpid_start,
3760         .next = fpid_next,
3761         .stop = fpid_stop,
3762         .show = fpid_show,
3763 };
3764
3765 static int
3766 ftrace_pid_open(struct inode *inode, struct file *file)
3767 {
3768         int ret = 0;
3769
3770         if ((file->f_mode & FMODE_WRITE) &&
3771             (file->f_flags & O_TRUNC))
3772                 ftrace_pid_reset();
3773
3774         if (file->f_mode & FMODE_READ)
3775                 ret = seq_open(file, &ftrace_pid_sops);
3776
3777         return ret;
3778 }
3779
3780 static ssize_t
3781 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3782                    size_t cnt, loff_t *ppos)
3783 {
3784         char buf[64], *tmp;
3785         long val;
3786         int ret;
3787
3788         if (cnt >= sizeof(buf))
3789                 return -EINVAL;
3790
3791         if (copy_from_user(&buf, ubuf, cnt))
3792                 return -EFAULT;
3793
3794         buf[cnt] = 0;
3795
3796         /*
3797          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3798          * to clean the filter quietly.
3799          */
3800         tmp = strstrip(buf);
3801         if (strlen(tmp) == 0)
3802                 return 1;
3803
3804         ret = strict_strtol(tmp, 10, &val);
3805         if (ret < 0)
3806                 return ret;
3807
3808         ret = ftrace_pid_add(val);
3809
3810         return ret ? ret : cnt;
3811 }
3812
3813 static int
3814 ftrace_pid_release(struct inode *inode, struct file *file)
3815 {
3816         if (file->f_mode & FMODE_READ)
3817                 seq_release(inode, file);
3818
3819         return 0;
3820 }
3821
3822 static const struct file_operations ftrace_pid_fops = {
3823         .open           = ftrace_pid_open,
3824         .write          = ftrace_pid_write,
3825         .read           = seq_read,
3826         .llseek         = seq_lseek,
3827         .release        = ftrace_pid_release,
3828 };
3829
3830 static __init int ftrace_init_debugfs(void)
3831 {
3832         struct dentry *d_tracer;
3833
3834         d_tracer = tracing_init_dentry();
3835         if (!d_tracer)
3836                 return 0;
3837
3838         ftrace_init_dyn_debugfs(d_tracer);
3839
3840         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3841                             NULL, &ftrace_pid_fops);
3842
3843         ftrace_profile_debugfs(d_tracer);
3844
3845         return 0;
3846 }
3847 fs_initcall(ftrace_init_debugfs);
3848
3849 /**
3850  * ftrace_kill - kill ftrace
3851  *
3852  * This function should be used by panic code. It stops ftrace
3853  * but in a not so nice way. If you need to simply kill ftrace
3854  * from a non-atomic section, use ftrace_kill.
3855  */
3856 void ftrace_kill(void)
3857 {
3858         ftrace_disabled = 1;
3859         ftrace_enabled = 0;
3860         clear_ftrace_function();
3861 }
3862
3863 /**
3864  * register_ftrace_function - register a function for profiling
3865  * @ops - ops structure that holds the function for profiling.
3866  *
3867  * Register a function to be called by all functions in the
3868  * kernel.
3869  *
3870  * Note: @ops->func and all the functions it calls must be labeled
3871  *       with "notrace", otherwise it will go into a
3872  *       recursive loop.
3873  */
3874 int register_ftrace_function(struct ftrace_ops *ops)
3875 {
3876         int ret = -1;
3877
3878         mutex_lock(&ftrace_lock);
3879
3880         if (unlikely(ftrace_disabled))
3881                 goto out_unlock;
3882
3883         ret = __register_ftrace_function(ops);
3884         if (!ret)
3885                 ret = ftrace_startup(ops, 0);
3886
3887
3888  out_unlock:
3889         mutex_unlock(&ftrace_lock);
3890         return ret;
3891 }
3892 EXPORT_SYMBOL_GPL(register_ftrace_function);
3893
3894 /**
3895  * unregister_ftrace_function - unregister a function for profiling.
3896  * @ops - ops structure that holds the function to unregister
3897  *
3898  * Unregister a function that was added to be called by ftrace profiling.
3899  */
3900 int unregister_ftrace_function(struct ftrace_ops *ops)
3901 {
3902         int ret;
3903
3904         mutex_lock(&ftrace_lock);
3905         ret = __unregister_ftrace_function(ops);
3906         if (!ret)
3907                 ftrace_shutdown(ops, 0);
3908         mutex_unlock(&ftrace_lock);
3909
3910         return ret;
3911 }
3912 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3913
3914 int
3915 ftrace_enable_sysctl(struct ctl_table *table, int write,
3916                      void __user *buffer, size_t *lenp,
3917                      loff_t *ppos)
3918 {
3919         int ret = -ENODEV;
3920
3921         mutex_lock(&ftrace_lock);
3922
3923         if (unlikely(ftrace_disabled))
3924                 goto out;
3925
3926         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3927
3928         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3929                 goto out;
3930
3931         last_ftrace_enabled = !!ftrace_enabled;
3932
3933         if (ftrace_enabled) {
3934
3935                 ftrace_startup_sysctl();
3936
3937                 /* we are starting ftrace again */
3938                 if (ftrace_ops_list != &ftrace_list_end) {
3939                         if (ftrace_ops_list->next == &ftrace_list_end)
3940                                 ftrace_trace_function = ftrace_ops_list->func;
3941                         else
3942                                 ftrace_trace_function = ftrace_ops_list_func;
3943                 }
3944
3945         } else {
3946                 /* stopping ftrace calls (just send to ftrace_stub) */
3947                 ftrace_trace_function = ftrace_stub;
3948
3949                 ftrace_shutdown_sysctl();
3950         }
3951
3952  out:
3953         mutex_unlock(&ftrace_lock);
3954         return ret;
3955 }
3956
3957 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3958
3959 static int ftrace_graph_active;
3960 static struct notifier_block ftrace_suspend_notifier;
3961
3962 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3963 {
3964         return 0;
3965 }
3966
3967 /* The callbacks that hook a function */
3968 trace_func_graph_ret_t ftrace_graph_return =
3969                         (trace_func_graph_ret_t)ftrace_stub;
3970 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3971
3972 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3973 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3974 {
3975         int i;
3976         int ret = 0;
3977         unsigned long flags;
3978         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3979         struct task_struct *g, *t;
3980
3981         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3982                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3983                                         * sizeof(struct ftrace_ret_stack),
3984                                         GFP_KERNEL);
3985                 if (!ret_stack_list[i]) {
3986                         start = 0;
3987                         end = i;
3988                         ret = -ENOMEM;
3989                         goto free;
3990                 }
3991         }
3992
3993         read_lock_irqsave(&tasklist_lock, flags);
3994         do_each_thread(g, t) {
3995                 if (start == end) {
3996                         ret = -EAGAIN;
3997                         goto unlock;
3998                 }
3999
4000                 if (t->ret_stack == NULL) {
4001                         atomic_set(&t->tracing_graph_pause, 0);
4002                         atomic_set(&t->trace_overrun, 0);
4003                         t->curr_ret_stack = -1;
4004                         /* Make sure the tasks see the -1 first: */
4005                         smp_wmb();
4006                         t->ret_stack = ret_stack_list[start++];
4007                 }
4008         } while_each_thread(g, t);
4009
4010 unlock:
4011         read_unlock_irqrestore(&tasklist_lock, flags);
4012 free:
4013         for (i = start; i < end; i++)
4014                 kfree(ret_stack_list[i]);
4015         return ret;
4016 }
4017
4018 static void
4019 ftrace_graph_probe_sched_switch(void *ignore,
4020                         struct task_struct *prev, struct task_struct *next)
4021 {
4022         unsigned long long timestamp;
4023         int index;
4024
4025         /*
4026          * Does the user want to count the time a function was asleep.
4027          * If so, do not update the time stamps.
4028          */
4029         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4030                 return;
4031
4032         timestamp = trace_clock_local();
4033
4034         prev->ftrace_timestamp = timestamp;
4035
4036         /* only process tasks that we timestamped */
4037         if (!next->ftrace_timestamp)
4038                 return;
4039
4040         /*
4041          * Update all the counters in next to make up for the
4042          * time next was sleeping.
4043          */
4044         timestamp -= next->ftrace_timestamp;
4045
4046         for (index = next->curr_ret_stack; index >= 0; index--)
4047                 next->ret_stack[index].calltime += timestamp;
4048 }
4049
4050 /* Allocate a return stack for each task */
4051 static int start_graph_tracing(void)
4052 {
4053         struct ftrace_ret_stack **ret_stack_list;
4054         int ret, cpu;
4055
4056         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4057                                 sizeof(struct ftrace_ret_stack *),
4058                                 GFP_KERNEL);
4059
4060         if (!ret_stack_list)
4061                 return -ENOMEM;
4062
4063         /* The cpu_boot init_task->ret_stack will never be freed */
4064         for_each_online_cpu(cpu) {
4065                 if (!idle_task(cpu)->ret_stack)
4066                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4067         }
4068
4069         do {
4070                 ret = alloc_retstack_tasklist(ret_stack_list);
4071         } while (ret == -EAGAIN);
4072
4073         if (!ret) {
4074                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4075                 if (ret)
4076                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4077                                 " probe to kernel_sched_switch\n");
4078         }
4079
4080         kfree(ret_stack_list);
4081         return ret;
4082 }
4083
4084 /*
4085  * Hibernation protection.
4086  * The state of the current task is too much unstable during
4087  * suspend/restore to disk. We want to protect against that.
4088  */
4089 static int
4090 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4091                                                         void *unused)
4092 {
4093         switch (state) {
4094         case PM_HIBERNATION_PREPARE:
4095                 pause_graph_tracing();
4096                 break;
4097
4098         case PM_POST_HIBERNATION:
4099                 unpause_graph_tracing();
4100                 break;
4101         }
4102         return NOTIFY_DONE;
4103 }
4104
4105 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4106                         trace_func_graph_ent_t entryfunc)
4107 {
4108         int ret = 0;
4109
4110         mutex_lock(&ftrace_lock);
4111
4112         /* we currently allow only one tracer registered at a time */
4113         if (ftrace_graph_active) {
4114                 ret = -EBUSY;
4115                 goto out;
4116         }
4117
4118         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4119         register_pm_notifier(&ftrace_suspend_notifier);
4120
4121         ftrace_graph_active++;
4122         ret = start_graph_tracing();
4123         if (ret) {
4124                 ftrace_graph_active--;
4125                 goto out;
4126         }
4127
4128         ftrace_graph_return = retfunc;
4129         ftrace_graph_entry = entryfunc;
4130
4131         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4132
4133 out:
4134         mutex_unlock(&ftrace_lock);
4135         return ret;
4136 }
4137
4138 void unregister_ftrace_graph(void)
4139 {
4140         mutex_lock(&ftrace_lock);
4141
4142         if (unlikely(!ftrace_graph_active))
4143                 goto out;
4144
4145         ftrace_graph_active--;
4146         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4147         ftrace_graph_entry = ftrace_graph_entry_stub;
4148         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4149         unregister_pm_notifier(&ftrace_suspend_notifier);
4150         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4151
4152  out:
4153         mutex_unlock(&ftrace_lock);
4154 }
4155
4156 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4157
4158 static void
4159 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4160 {
4161         atomic_set(&t->tracing_graph_pause, 0);
4162         atomic_set(&t->trace_overrun, 0);
4163         t->ftrace_timestamp = 0;
4164         /* make curr_ret_stack visible before we add the ret_stack */
4165         smp_wmb();
4166         t->ret_stack = ret_stack;
4167 }
4168
4169 /*
4170  * Allocate a return stack for the idle task. May be the first
4171  * time through, or it may be done by CPU hotplug online.
4172  */
4173 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4174 {
4175         t->curr_ret_stack = -1;
4176         /*
4177          * The idle task has no parent, it either has its own
4178          * stack or no stack at all.
4179          */
4180         if (t->ret_stack)
4181                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4182
4183         if (ftrace_graph_active) {
4184                 struct ftrace_ret_stack *ret_stack;
4185
4186                 ret_stack = per_cpu(idle_ret_stack, cpu);
4187                 if (!ret_stack) {
4188                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4189                                             * sizeof(struct ftrace_ret_stack),
4190                                             GFP_KERNEL);
4191                         if (!ret_stack)
4192                                 return;
4193                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4194                 }
4195                 graph_init_task(t, ret_stack);
4196         }
4197 }
4198
4199 /* Allocate a return stack for newly created task */
4200 void ftrace_graph_init_task(struct task_struct *t)
4201 {
4202         /* Make sure we do not use the parent ret_stack */
4203         t->ret_stack = NULL;
4204         t->curr_ret_stack = -1;
4205
4206         if (ftrace_graph_active) {
4207                 struct ftrace_ret_stack *ret_stack;
4208
4209                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4210                                 * sizeof(struct ftrace_ret_stack),
4211                                 GFP_KERNEL);
4212                 if (!ret_stack)
4213                         return;
4214                 graph_init_task(t, ret_stack);
4215         }
4216 }
4217
4218 void ftrace_graph_exit_task(struct task_struct *t)
4219 {
4220         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4221
4222         t->ret_stack = NULL;
4223         /* NULL must become visible to IRQs before we free it: */
4224         barrier();
4225
4226         kfree(ret_stack);
4227 }
4228
4229 void ftrace_graph_stop(void)
4230 {
4231         ftrace_stop();
4232 }
4233 #endif