48d3762b828a54c8abda0c064cba20869d09176f
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         ({                                      \
42                 int ___r = cond;                \
43                 if (WARN_ON(___r))              \
44                         ftrace_kill();          \
45                 ___r;                           \
46         })
47
48 #define FTRACE_WARN_ON_ONCE(cond)               \
49         ({                                      \
50                 int ___r = cond;                \
51                 if (WARN_ON_ONCE(___r))         \
52                         ftrace_kill();          \
53                 ___r;                           \
54         })
55
56 /* hash bits for specific function selection */
57 #define FTRACE_HASH_BITS 7
58 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
59 #define FTRACE_HASH_DEFAULT_BITS 10
60 #define FTRACE_HASH_MAX_BITS 12
61
62 /* ftrace_enabled is a method to turn ftrace on or off */
63 int ftrace_enabled __read_mostly;
64 static int last_ftrace_enabled;
65
66 /* Quick disabling of function tracer. */
67 int function_trace_stop;
68
69 /* List for set_ftrace_pid's pids. */
70 LIST_HEAD(ftrace_pids);
71 struct ftrace_pid {
72         struct list_head list;
73         struct pid *pid;
74 };
75
76 /*
77  * ftrace_disabled is set when an anomaly is discovered.
78  * ftrace_disabled is much stronger than ftrace_enabled.
79  */
80 static int ftrace_disabled __read_mostly;
81
82 static DEFINE_MUTEX(ftrace_lock);
83
84 static struct ftrace_ops ftrace_list_end __read_mostly = {
85         .func           = ftrace_stub,
86 };
87
88 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
89 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
90 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
91 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
92 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94 static struct ftrace_ops global_ops;
95
96 static void
97 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
98
99 /*
100  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
101  * can use rcu_dereference_raw() is that elements removed from this list
102  * are simply leaked, so there is no need to interact with a grace-period
103  * mechanism.  The rcu_dereference_raw() calls are needed to handle
104  * concurrent insertions into the ftrace_global_list.
105  *
106  * Silly Alpha and silly pointer-speculation compiler optimizations!
107  */
108 static void ftrace_global_list_func(unsigned long ip,
109                                     unsigned long parent_ip)
110 {
111         struct ftrace_ops *op;
112
113         if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
114                 return;
115
116         trace_recursion_set(TRACE_GLOBAL_BIT);
117         op = rcu_dereference_raw(ftrace_global_list); /*see above*/
118         while (op != &ftrace_list_end) {
119                 op->func(ip, parent_ip);
120                 op = rcu_dereference_raw(op->next); /*see above*/
121         };
122         trace_recursion_clear(TRACE_GLOBAL_BIT);
123 }
124
125 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
126 {
127         if (!test_tsk_trace_trace(current))
128                 return;
129
130         ftrace_pid_function(ip, parent_ip);
131 }
132
133 static void set_ftrace_pid_function(ftrace_func_t func)
134 {
135         /* do not set ftrace_pid_function to itself! */
136         if (func != ftrace_pid_func)
137                 ftrace_pid_function = func;
138 }
139
140 /**
141  * clear_ftrace_function - reset the ftrace function
142  *
143  * This NULLs the ftrace function and in essence stops
144  * tracing.  There may be lag
145  */
146 void clear_ftrace_function(void)
147 {
148         ftrace_trace_function = ftrace_stub;
149         __ftrace_trace_function = ftrace_stub;
150         __ftrace_trace_function_delay = ftrace_stub;
151         ftrace_pid_function = ftrace_stub;
152 }
153
154 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
155 /*
156  * For those archs that do not test ftrace_trace_stop in their
157  * mcount call site, we need to do it from C.
158  */
159 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
160 {
161         if (function_trace_stop)
162                 return;
163
164         __ftrace_trace_function(ip, parent_ip);
165 }
166 #endif
167
168 static void update_global_ops(void)
169 {
170         ftrace_func_t func;
171
172         /*
173          * If there's only one function registered, then call that
174          * function directly. Otherwise, we need to iterate over the
175          * registered callers.
176          */
177         if (ftrace_global_list == &ftrace_list_end ||
178             ftrace_global_list->next == &ftrace_list_end)
179                 func = ftrace_global_list->func;
180         else
181                 func = ftrace_global_list_func;
182
183         /* If we filter on pids, update to use the pid function */
184         if (!list_empty(&ftrace_pids)) {
185                 set_ftrace_pid_function(func);
186                 func = ftrace_pid_func;
187         }
188
189         global_ops.func = func;
190 }
191
192 static void update_ftrace_function(void)
193 {
194         ftrace_func_t func;
195
196         update_global_ops();
197
198         /*
199          * If we are at the end of the list and this ops is
200          * not dynamic, then have the mcount trampoline call
201          * the function directly
202          */
203         if (ftrace_ops_list == &ftrace_list_end ||
204             (ftrace_ops_list->next == &ftrace_list_end &&
205              !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
206                 func = ftrace_ops_list->func;
207         else
208                 func = ftrace_ops_list_func;
209
210 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
211         ftrace_trace_function = func;
212 #else
213 #ifdef CONFIG_DYNAMIC_FTRACE
214         /* do not update till all functions have been modified */
215         __ftrace_trace_function_delay = func;
216 #else
217         __ftrace_trace_function = func;
218 #endif
219         ftrace_trace_function = ftrace_test_stop_func;
220 #endif
221 }
222
223 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
224 {
225         ops->next = *list;
226         /*
227          * We are entering ops into the list but another
228          * CPU might be walking that list. We need to make sure
229          * the ops->next pointer is valid before another CPU sees
230          * the ops pointer included into the list.
231          */
232         rcu_assign_pointer(*list, ops);
233 }
234
235 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
236 {
237         struct ftrace_ops **p;
238
239         /*
240          * If we are removing the last function, then simply point
241          * to the ftrace_stub.
242          */
243         if (*list == ops && ops->next == &ftrace_list_end) {
244                 *list = &ftrace_list_end;
245                 return 0;
246         }
247
248         for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
249                 if (*p == ops)
250                         break;
251
252         if (*p != ops)
253                 return -1;
254
255         *p = (*p)->next;
256         return 0;
257 }
258
259 static int __register_ftrace_function(struct ftrace_ops *ops)
260 {
261         if (ftrace_disabled)
262                 return -ENODEV;
263
264         if (FTRACE_WARN_ON(ops == &global_ops))
265                 return -EINVAL;
266
267         if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
268                 return -EBUSY;
269
270         if (!core_kernel_data((unsigned long)ops))
271                 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
272
273         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
274                 int first = ftrace_global_list == &ftrace_list_end;
275                 add_ftrace_ops(&ftrace_global_list, ops);
276                 ops->flags |= FTRACE_OPS_FL_ENABLED;
277                 if (first)
278                         add_ftrace_ops(&ftrace_ops_list, &global_ops);
279         } else
280                 add_ftrace_ops(&ftrace_ops_list, ops);
281
282         if (ftrace_enabled)
283                 update_ftrace_function();
284
285         return 0;
286 }
287
288 static int __unregister_ftrace_function(struct ftrace_ops *ops)
289 {
290         int ret;
291
292         if (ftrace_disabled)
293                 return -ENODEV;
294
295         if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
296                 return -EBUSY;
297
298         if (FTRACE_WARN_ON(ops == &global_ops))
299                 return -EINVAL;
300
301         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
302                 ret = remove_ftrace_ops(&ftrace_global_list, ops);
303                 if (!ret && ftrace_global_list == &ftrace_list_end)
304                         ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
305                 if (!ret)
306                         ops->flags &= ~FTRACE_OPS_FL_ENABLED;
307         } else
308                 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
309
310         if (ret < 0)
311                 return ret;
312
313         if (ftrace_enabled)
314                 update_ftrace_function();
315
316         /*
317          * Dynamic ops may be freed, we must make sure that all
318          * callers are done before leaving this function.
319          */
320         if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
321                 synchronize_sched();
322
323         return 0;
324 }
325
326 static void ftrace_update_pid_func(void)
327 {
328         /* Only do something if we are tracing something */
329         if (ftrace_trace_function == ftrace_stub)
330                 return;
331
332         update_ftrace_function();
333 }
334
335 #ifdef CONFIG_FUNCTION_PROFILER
336 struct ftrace_profile {
337         struct hlist_node               node;
338         unsigned long                   ip;
339         unsigned long                   counter;
340 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
341         unsigned long long              time;
342         unsigned long long              time_squared;
343 #endif
344 };
345
346 struct ftrace_profile_page {
347         struct ftrace_profile_page      *next;
348         unsigned long                   index;
349         struct ftrace_profile           records[];
350 };
351
352 struct ftrace_profile_stat {
353         atomic_t                        disabled;
354         struct hlist_head               *hash;
355         struct ftrace_profile_page      *pages;
356         struct ftrace_profile_page      *start;
357         struct tracer_stat              stat;
358 };
359
360 #define PROFILE_RECORDS_SIZE                                            \
361         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
362
363 #define PROFILES_PER_PAGE                                       \
364         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
365
366 static int ftrace_profile_bits __read_mostly;
367 static int ftrace_profile_enabled __read_mostly;
368
369 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
370 static DEFINE_MUTEX(ftrace_profile_lock);
371
372 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
373
374 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
375
376 static void *
377 function_stat_next(void *v, int idx)
378 {
379         struct ftrace_profile *rec = v;
380         struct ftrace_profile_page *pg;
381
382         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
383
384  again:
385         if (idx != 0)
386                 rec++;
387
388         if ((void *)rec >= (void *)&pg->records[pg->index]) {
389                 pg = pg->next;
390                 if (!pg)
391                         return NULL;
392                 rec = &pg->records[0];
393                 if (!rec->counter)
394                         goto again;
395         }
396
397         return rec;
398 }
399
400 static void *function_stat_start(struct tracer_stat *trace)
401 {
402         struct ftrace_profile_stat *stat =
403                 container_of(trace, struct ftrace_profile_stat, stat);
404
405         if (!stat || !stat->start)
406                 return NULL;
407
408         return function_stat_next(&stat->start->records[0], 0);
409 }
410
411 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
412 /* function graph compares on total time */
413 static int function_stat_cmp(void *p1, void *p2)
414 {
415         struct ftrace_profile *a = p1;
416         struct ftrace_profile *b = p2;
417
418         if (a->time < b->time)
419                 return -1;
420         if (a->time > b->time)
421                 return 1;
422         else
423                 return 0;
424 }
425 #else
426 /* not function graph compares against hits */
427 static int function_stat_cmp(void *p1, void *p2)
428 {
429         struct ftrace_profile *a = p1;
430         struct ftrace_profile *b = p2;
431
432         if (a->counter < b->counter)
433                 return -1;
434         if (a->counter > b->counter)
435                 return 1;
436         else
437                 return 0;
438 }
439 #endif
440
441 static int function_stat_headers(struct seq_file *m)
442 {
443 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
444         seq_printf(m, "  Function                               "
445                    "Hit    Time            Avg             s^2\n"
446                       "  --------                               "
447                    "---    ----            ---             ---\n");
448 #else
449         seq_printf(m, "  Function                               Hit\n"
450                       "  --------                               ---\n");
451 #endif
452         return 0;
453 }
454
455 static int function_stat_show(struct seq_file *m, void *v)
456 {
457         struct ftrace_profile *rec = v;
458         char str[KSYM_SYMBOL_LEN];
459         int ret = 0;
460 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
461         static struct trace_seq s;
462         unsigned long long avg;
463         unsigned long long stddev;
464 #endif
465         mutex_lock(&ftrace_profile_lock);
466
467         /* we raced with function_profile_reset() */
468         if (unlikely(rec->counter == 0)) {
469                 ret = -EBUSY;
470                 goto out;
471         }
472
473         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
474         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
475
476 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
477         seq_printf(m, "    ");
478         avg = rec->time;
479         do_div(avg, rec->counter);
480
481         /* Sample standard deviation (s^2) */
482         if (rec->counter <= 1)
483                 stddev = 0;
484         else {
485                 stddev = rec->time_squared - rec->counter * avg * avg;
486                 /*
487                  * Divide only 1000 for ns^2 -> us^2 conversion.
488                  * trace_print_graph_duration will divide 1000 again.
489                  */
490                 do_div(stddev, (rec->counter - 1) * 1000);
491         }
492
493         trace_seq_init(&s);
494         trace_print_graph_duration(rec->time, &s);
495         trace_seq_puts(&s, "    ");
496         trace_print_graph_duration(avg, &s);
497         trace_seq_puts(&s, "    ");
498         trace_print_graph_duration(stddev, &s);
499         trace_print_seq(m, &s);
500 #endif
501         seq_putc(m, '\n');
502 out:
503         mutex_unlock(&ftrace_profile_lock);
504
505         return ret;
506 }
507
508 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
509 {
510         struct ftrace_profile_page *pg;
511
512         pg = stat->pages = stat->start;
513
514         while (pg) {
515                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
516                 pg->index = 0;
517                 pg = pg->next;
518         }
519
520         memset(stat->hash, 0,
521                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
522 }
523
524 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
525 {
526         struct ftrace_profile_page *pg;
527         int functions;
528         int pages;
529         int i;
530
531         /* If we already allocated, do nothing */
532         if (stat->pages)
533                 return 0;
534
535         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
536         if (!stat->pages)
537                 return -ENOMEM;
538
539 #ifdef CONFIG_DYNAMIC_FTRACE
540         functions = ftrace_update_tot_cnt;
541 #else
542         /*
543          * We do not know the number of functions that exist because
544          * dynamic tracing is what counts them. With past experience
545          * we have around 20K functions. That should be more than enough.
546          * It is highly unlikely we will execute every function in
547          * the kernel.
548          */
549         functions = 20000;
550 #endif
551
552         pg = stat->start = stat->pages;
553
554         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
555
556         for (i = 0; i < pages; i++) {
557                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
558                 if (!pg->next)
559                         goto out_free;
560                 pg = pg->next;
561         }
562
563         return 0;
564
565  out_free:
566         pg = stat->start;
567         while (pg) {
568                 unsigned long tmp = (unsigned long)pg;
569
570                 pg = pg->next;
571                 free_page(tmp);
572         }
573
574         free_page((unsigned long)stat->pages);
575         stat->pages = NULL;
576         stat->start = NULL;
577
578         return -ENOMEM;
579 }
580
581 static int ftrace_profile_init_cpu(int cpu)
582 {
583         struct ftrace_profile_stat *stat;
584         int size;
585
586         stat = &per_cpu(ftrace_profile_stats, cpu);
587
588         if (stat->hash) {
589                 /* If the profile is already created, simply reset it */
590                 ftrace_profile_reset(stat);
591                 return 0;
592         }
593
594         /*
595          * We are profiling all functions, but usually only a few thousand
596          * functions are hit. We'll make a hash of 1024 items.
597          */
598         size = FTRACE_PROFILE_HASH_SIZE;
599
600         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
601
602         if (!stat->hash)
603                 return -ENOMEM;
604
605         if (!ftrace_profile_bits) {
606                 size--;
607
608                 for (; size; size >>= 1)
609                         ftrace_profile_bits++;
610         }
611
612         /* Preallocate the function profiling pages */
613         if (ftrace_profile_pages_init(stat) < 0) {
614                 kfree(stat->hash);
615                 stat->hash = NULL;
616                 return -ENOMEM;
617         }
618
619         return 0;
620 }
621
622 static int ftrace_profile_init(void)
623 {
624         int cpu;
625         int ret = 0;
626
627         for_each_online_cpu(cpu) {
628                 ret = ftrace_profile_init_cpu(cpu);
629                 if (ret)
630                         break;
631         }
632
633         return ret;
634 }
635
636 /* interrupts must be disabled */
637 static struct ftrace_profile *
638 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
639 {
640         struct ftrace_profile *rec;
641         struct hlist_head *hhd;
642         struct hlist_node *n;
643         unsigned long key;
644
645         key = hash_long(ip, ftrace_profile_bits);
646         hhd = &stat->hash[key];
647
648         if (hlist_empty(hhd))
649                 return NULL;
650
651         hlist_for_each_entry_rcu(rec, n, hhd, node) {
652                 if (rec->ip == ip)
653                         return rec;
654         }
655
656         return NULL;
657 }
658
659 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
660                                struct ftrace_profile *rec)
661 {
662         unsigned long key;
663
664         key = hash_long(rec->ip, ftrace_profile_bits);
665         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
666 }
667
668 /*
669  * The memory is already allocated, this simply finds a new record to use.
670  */
671 static struct ftrace_profile *
672 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
673 {
674         struct ftrace_profile *rec = NULL;
675
676         /* prevent recursion (from NMIs) */
677         if (atomic_inc_return(&stat->disabled) != 1)
678                 goto out;
679
680         /*
681          * Try to find the function again since an NMI
682          * could have added it
683          */
684         rec = ftrace_find_profiled_func(stat, ip);
685         if (rec)
686                 goto out;
687
688         if (stat->pages->index == PROFILES_PER_PAGE) {
689                 if (!stat->pages->next)
690                         goto out;
691                 stat->pages = stat->pages->next;
692         }
693
694         rec = &stat->pages->records[stat->pages->index++];
695         rec->ip = ip;
696         ftrace_add_profile(stat, rec);
697
698  out:
699         atomic_dec(&stat->disabled);
700
701         return rec;
702 }
703
704 static void
705 function_profile_call(unsigned long ip, unsigned long parent_ip)
706 {
707         struct ftrace_profile_stat *stat;
708         struct ftrace_profile *rec;
709         unsigned long flags;
710
711         if (!ftrace_profile_enabled)
712                 return;
713
714         local_irq_save(flags);
715
716         stat = &__get_cpu_var(ftrace_profile_stats);
717         if (!stat->hash || !ftrace_profile_enabled)
718                 goto out;
719
720         rec = ftrace_find_profiled_func(stat, ip);
721         if (!rec) {
722                 rec = ftrace_profile_alloc(stat, ip);
723                 if (!rec)
724                         goto out;
725         }
726
727         rec->counter++;
728  out:
729         local_irq_restore(flags);
730 }
731
732 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
733 static int profile_graph_entry(struct ftrace_graph_ent *trace)
734 {
735         function_profile_call(trace->func, 0);
736         return 1;
737 }
738
739 static void profile_graph_return(struct ftrace_graph_ret *trace)
740 {
741         struct ftrace_profile_stat *stat;
742         unsigned long long calltime;
743         struct ftrace_profile *rec;
744         unsigned long flags;
745
746         local_irq_save(flags);
747         stat = &__get_cpu_var(ftrace_profile_stats);
748         if (!stat->hash || !ftrace_profile_enabled)
749                 goto out;
750
751         /* If the calltime was zero'd ignore it */
752         if (!trace->calltime)
753                 goto out;
754
755         calltime = trace->rettime - trace->calltime;
756
757         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
758                 int index;
759
760                 index = trace->depth;
761
762                 /* Append this call time to the parent time to subtract */
763                 if (index)
764                         current->ret_stack[index - 1].subtime += calltime;
765
766                 if (current->ret_stack[index].subtime < calltime)
767                         calltime -= current->ret_stack[index].subtime;
768                 else
769                         calltime = 0;
770         }
771
772         rec = ftrace_find_profiled_func(stat, trace->func);
773         if (rec) {
774                 rec->time += calltime;
775                 rec->time_squared += calltime * calltime;
776         }
777
778  out:
779         local_irq_restore(flags);
780 }
781
782 static int register_ftrace_profiler(void)
783 {
784         return register_ftrace_graph(&profile_graph_return,
785                                      &profile_graph_entry);
786 }
787
788 static void unregister_ftrace_profiler(void)
789 {
790         unregister_ftrace_graph();
791 }
792 #else
793 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
794         .func           = function_profile_call,
795 };
796
797 static int register_ftrace_profiler(void)
798 {
799         return register_ftrace_function(&ftrace_profile_ops);
800 }
801
802 static void unregister_ftrace_profiler(void)
803 {
804         unregister_ftrace_function(&ftrace_profile_ops);
805 }
806 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
807
808 static ssize_t
809 ftrace_profile_write(struct file *filp, const char __user *ubuf,
810                      size_t cnt, loff_t *ppos)
811 {
812         unsigned long val;
813         int ret;
814
815         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
816         if (ret)
817                 return ret;
818
819         val = !!val;
820
821         mutex_lock(&ftrace_profile_lock);
822         if (ftrace_profile_enabled ^ val) {
823                 if (val) {
824                         ret = ftrace_profile_init();
825                         if (ret < 0) {
826                                 cnt = ret;
827                                 goto out;
828                         }
829
830                         ret = register_ftrace_profiler();
831                         if (ret < 0) {
832                                 cnt = ret;
833                                 goto out;
834                         }
835                         ftrace_profile_enabled = 1;
836                 } else {
837                         ftrace_profile_enabled = 0;
838                         /*
839                          * unregister_ftrace_profiler calls stop_machine
840                          * so this acts like an synchronize_sched.
841                          */
842                         unregister_ftrace_profiler();
843                 }
844         }
845  out:
846         mutex_unlock(&ftrace_profile_lock);
847
848         *ppos += cnt;
849
850         return cnt;
851 }
852
853 static ssize_t
854 ftrace_profile_read(struct file *filp, char __user *ubuf,
855                      size_t cnt, loff_t *ppos)
856 {
857         char buf[64];           /* big enough to hold a number */
858         int r;
859
860         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
861         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
862 }
863
864 static const struct file_operations ftrace_profile_fops = {
865         .open           = tracing_open_generic,
866         .read           = ftrace_profile_read,
867         .write          = ftrace_profile_write,
868         .llseek         = default_llseek,
869 };
870
871 /* used to initialize the real stat files */
872 static struct tracer_stat function_stats __initdata = {
873         .name           = "functions",
874         .stat_start     = function_stat_start,
875         .stat_next      = function_stat_next,
876         .stat_cmp       = function_stat_cmp,
877         .stat_headers   = function_stat_headers,
878         .stat_show      = function_stat_show
879 };
880
881 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
882 {
883         struct ftrace_profile_stat *stat;
884         struct dentry *entry;
885         char *name;
886         int ret;
887         int cpu;
888
889         for_each_possible_cpu(cpu) {
890                 stat = &per_cpu(ftrace_profile_stats, cpu);
891
892                 /* allocate enough for function name + cpu number */
893                 name = kmalloc(32, GFP_KERNEL);
894                 if (!name) {
895                         /*
896                          * The files created are permanent, if something happens
897                          * we still do not free memory.
898                          */
899                         WARN(1,
900                              "Could not allocate stat file for cpu %d\n",
901                              cpu);
902                         return;
903                 }
904                 stat->stat = function_stats;
905                 snprintf(name, 32, "function%d", cpu);
906                 stat->stat.name = name;
907                 ret = register_stat_tracer(&stat->stat);
908                 if (ret) {
909                         WARN(1,
910                              "Could not register function stat for cpu %d\n",
911                              cpu);
912                         kfree(name);
913                         return;
914                 }
915         }
916
917         entry = debugfs_create_file("function_profile_enabled", 0644,
918                                     d_tracer, NULL, &ftrace_profile_fops);
919         if (!entry)
920                 pr_warning("Could not create debugfs "
921                            "'function_profile_enabled' entry\n");
922 }
923
924 #else /* CONFIG_FUNCTION_PROFILER */
925 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
926 {
927 }
928 #endif /* CONFIG_FUNCTION_PROFILER */
929
930 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
931
932 #ifdef CONFIG_DYNAMIC_FTRACE
933
934 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
935 # error Dynamic ftrace depends on MCOUNT_RECORD
936 #endif
937
938 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
939
940 struct ftrace_func_probe {
941         struct hlist_node       node;
942         struct ftrace_probe_ops *ops;
943         unsigned long           flags;
944         unsigned long           ip;
945         void                    *data;
946         struct rcu_head         rcu;
947 };
948
949 enum {
950         FTRACE_ENABLE_CALLS             = (1 << 0),
951         FTRACE_DISABLE_CALLS            = (1 << 1),
952         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
953         FTRACE_START_FUNC_RET           = (1 << 3),
954         FTRACE_STOP_FUNC_RET            = (1 << 4),
955 };
956 struct ftrace_func_entry {
957         struct hlist_node hlist;
958         unsigned long ip;
959 };
960
961 struct ftrace_hash {
962         unsigned long           size_bits;
963         struct hlist_head       *buckets;
964         unsigned long           count;
965         struct rcu_head         rcu;
966 };
967
968 /*
969  * We make these constant because no one should touch them,
970  * but they are used as the default "empty hash", to avoid allocating
971  * it all the time. These are in a read only section such that if
972  * anyone does try to modify it, it will cause an exception.
973  */
974 static const struct hlist_head empty_buckets[1];
975 static const struct ftrace_hash empty_hash = {
976         .buckets = (struct hlist_head *)empty_buckets,
977 };
978 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
979
980 static struct ftrace_ops global_ops = {
981         .func                   = ftrace_stub,
982         .notrace_hash           = EMPTY_HASH,
983         .filter_hash            = EMPTY_HASH,
984 };
985
986 static struct dyn_ftrace *ftrace_new_addrs;
987
988 static DEFINE_MUTEX(ftrace_regex_lock);
989
990 struct ftrace_page {
991         struct ftrace_page      *next;
992         int                     index;
993         struct dyn_ftrace       records[];
994 };
995
996 #define ENTRIES_PER_PAGE \
997   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
998
999 /* estimate from running different kernels */
1000 #define NR_TO_INIT              10000
1001
1002 static struct ftrace_page       *ftrace_pages_start;
1003 static struct ftrace_page       *ftrace_pages;
1004
1005 static struct dyn_ftrace *ftrace_free_records;
1006
1007 static struct ftrace_func_entry *
1008 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1009 {
1010         unsigned long key;
1011         struct ftrace_func_entry *entry;
1012         struct hlist_head *hhd;
1013         struct hlist_node *n;
1014
1015         if (!hash->count)
1016                 return NULL;
1017
1018         if (hash->size_bits > 0)
1019                 key = hash_long(ip, hash->size_bits);
1020         else
1021                 key = 0;
1022
1023         hhd = &hash->buckets[key];
1024
1025         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
1026                 if (entry->ip == ip)
1027                         return entry;
1028         }
1029         return NULL;
1030 }
1031
1032 static void __add_hash_entry(struct ftrace_hash *hash,
1033                              struct ftrace_func_entry *entry)
1034 {
1035         struct hlist_head *hhd;
1036         unsigned long key;
1037
1038         if (hash->size_bits)
1039                 key = hash_long(entry->ip, hash->size_bits);
1040         else
1041                 key = 0;
1042
1043         hhd = &hash->buckets[key];
1044         hlist_add_head(&entry->hlist, hhd);
1045         hash->count++;
1046 }
1047
1048 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1049 {
1050         struct ftrace_func_entry *entry;
1051
1052         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1053         if (!entry)
1054                 return -ENOMEM;
1055
1056         entry->ip = ip;
1057         __add_hash_entry(hash, entry);
1058
1059         return 0;
1060 }
1061
1062 static void
1063 free_hash_entry(struct ftrace_hash *hash,
1064                   struct ftrace_func_entry *entry)
1065 {
1066         hlist_del(&entry->hlist);
1067         kfree(entry);
1068         hash->count--;
1069 }
1070
1071 static void
1072 remove_hash_entry(struct ftrace_hash *hash,
1073                   struct ftrace_func_entry *entry)
1074 {
1075         hlist_del(&entry->hlist);
1076         hash->count--;
1077 }
1078
1079 static void ftrace_hash_clear(struct ftrace_hash *hash)
1080 {
1081         struct hlist_head *hhd;
1082         struct hlist_node *tp, *tn;
1083         struct ftrace_func_entry *entry;
1084         int size = 1 << hash->size_bits;
1085         int i;
1086
1087         if (!hash->count)
1088                 return;
1089
1090         for (i = 0; i < size; i++) {
1091                 hhd = &hash->buckets[i];
1092                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1093                         free_hash_entry(hash, entry);
1094         }
1095         FTRACE_WARN_ON(hash->count);
1096 }
1097
1098 static void free_ftrace_hash(struct ftrace_hash *hash)
1099 {
1100         if (!hash || hash == EMPTY_HASH)
1101                 return;
1102         ftrace_hash_clear(hash);
1103         kfree(hash->buckets);
1104         kfree(hash);
1105 }
1106
1107 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1108 {
1109         struct ftrace_hash *hash;
1110
1111         hash = container_of(rcu, struct ftrace_hash, rcu);
1112         free_ftrace_hash(hash);
1113 }
1114
1115 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1116 {
1117         if (!hash || hash == EMPTY_HASH)
1118                 return;
1119         call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1120 }
1121
1122 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1123 {
1124         struct ftrace_hash *hash;
1125         int size;
1126
1127         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1128         if (!hash)
1129                 return NULL;
1130
1131         size = 1 << size_bits;
1132         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1133
1134         if (!hash->buckets) {
1135                 kfree(hash);
1136                 return NULL;
1137         }
1138
1139         hash->size_bits = size_bits;
1140
1141         return hash;
1142 }
1143
1144 static struct ftrace_hash *
1145 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1146 {
1147         struct ftrace_func_entry *entry;
1148         struct ftrace_hash *new_hash;
1149         struct hlist_node *tp;
1150         int size;
1151         int ret;
1152         int i;
1153
1154         new_hash = alloc_ftrace_hash(size_bits);
1155         if (!new_hash)
1156                 return NULL;
1157
1158         /* Empty hash? */
1159         if (!hash || !hash->count)
1160                 return new_hash;
1161
1162         size = 1 << hash->size_bits;
1163         for (i = 0; i < size; i++) {
1164                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1165                         ret = add_hash_entry(new_hash, entry->ip);
1166                         if (ret < 0)
1167                                 goto free_hash;
1168                 }
1169         }
1170
1171         FTRACE_WARN_ON(new_hash->count != hash->count);
1172
1173         return new_hash;
1174
1175  free_hash:
1176         free_ftrace_hash(new_hash);
1177         return NULL;
1178 }
1179
1180 static void
1181 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1182 static void
1183 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1184
1185 static int
1186 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1187                  struct ftrace_hash **dst, struct ftrace_hash *src)
1188 {
1189         struct ftrace_func_entry *entry;
1190         struct hlist_node *tp, *tn;
1191         struct hlist_head *hhd;
1192         struct ftrace_hash *old_hash;
1193         struct ftrace_hash *new_hash;
1194         unsigned long key;
1195         int size = src->count;
1196         int bits = 0;
1197         int ret;
1198         int i;
1199
1200         /*
1201          * Remove the current set, update the hash and add
1202          * them back.
1203          */
1204         ftrace_hash_rec_disable(ops, enable);
1205
1206         /*
1207          * If the new source is empty, just free dst and assign it
1208          * the empty_hash.
1209          */
1210         if (!src->count) {
1211                 free_ftrace_hash_rcu(*dst);
1212                 rcu_assign_pointer(*dst, EMPTY_HASH);
1213                 return 0;
1214         }
1215
1216         /*
1217          * Make the hash size about 1/2 the # found
1218          */
1219         for (size /= 2; size; size >>= 1)
1220                 bits++;
1221
1222         /* Don't allocate too much */
1223         if (bits > FTRACE_HASH_MAX_BITS)
1224                 bits = FTRACE_HASH_MAX_BITS;
1225
1226         ret = -ENOMEM;
1227         new_hash = alloc_ftrace_hash(bits);
1228         if (!new_hash)
1229                 goto out;
1230
1231         size = 1 << src->size_bits;
1232         for (i = 0; i < size; i++) {
1233                 hhd = &src->buckets[i];
1234                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1235                         if (bits > 0)
1236                                 key = hash_long(entry->ip, bits);
1237                         else
1238                                 key = 0;
1239                         remove_hash_entry(src, entry);
1240                         __add_hash_entry(new_hash, entry);
1241                 }
1242         }
1243
1244         old_hash = *dst;
1245         rcu_assign_pointer(*dst, new_hash);
1246         free_ftrace_hash_rcu(old_hash);
1247
1248         ret = 0;
1249  out:
1250         /*
1251          * Enable regardless of ret:
1252          *  On success, we enable the new hash.
1253          *  On failure, we re-enable the original hash.
1254          */
1255         ftrace_hash_rec_enable(ops, enable);
1256
1257         return ret;
1258 }
1259
1260 /*
1261  * Test the hashes for this ops to see if we want to call
1262  * the ops->func or not.
1263  *
1264  * It's a match if the ip is in the ops->filter_hash or
1265  * the filter_hash does not exist or is empty,
1266  *  AND
1267  * the ip is not in the ops->notrace_hash.
1268  *
1269  * This needs to be called with preemption disabled as
1270  * the hashes are freed with call_rcu_sched().
1271  */
1272 static int
1273 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
1274 {
1275         struct ftrace_hash *filter_hash;
1276         struct ftrace_hash *notrace_hash;
1277         int ret;
1278
1279         filter_hash = rcu_dereference_raw(ops->filter_hash);
1280         notrace_hash = rcu_dereference_raw(ops->notrace_hash);
1281
1282         if ((!filter_hash || !filter_hash->count ||
1283              ftrace_lookup_ip(filter_hash, ip)) &&
1284             (!notrace_hash || !notrace_hash->count ||
1285              !ftrace_lookup_ip(notrace_hash, ip)))
1286                 ret = 1;
1287         else
1288                 ret = 0;
1289
1290         return ret;
1291 }
1292
1293 /*
1294  * This is a double for. Do not use 'break' to break out of the loop,
1295  * you must use a goto.
1296  */
1297 #define do_for_each_ftrace_rec(pg, rec)                                 \
1298         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1299                 int _____i;                                             \
1300                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1301                         rec = &pg->records[_____i];
1302
1303 #define while_for_each_ftrace_rec()             \
1304                 }                               \
1305         }
1306
1307 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1308                                      int filter_hash,
1309                                      bool inc)
1310 {
1311         struct ftrace_hash *hash;
1312         struct ftrace_hash *other_hash;
1313         struct ftrace_page *pg;
1314         struct dyn_ftrace *rec;
1315         int count = 0;
1316         int all = 0;
1317
1318         /* Only update if the ops has been registered */
1319         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1320                 return;
1321
1322         /*
1323          * In the filter_hash case:
1324          *   If the count is zero, we update all records.
1325          *   Otherwise we just update the items in the hash.
1326          *
1327          * In the notrace_hash case:
1328          *   We enable the update in the hash.
1329          *   As disabling notrace means enabling the tracing,
1330          *   and enabling notrace means disabling, the inc variable
1331          *   gets inversed.
1332          */
1333         if (filter_hash) {
1334                 hash = ops->filter_hash;
1335                 other_hash = ops->notrace_hash;
1336                 if (!hash || !hash->count)
1337                         all = 1;
1338         } else {
1339                 inc = !inc;
1340                 hash = ops->notrace_hash;
1341                 other_hash = ops->filter_hash;
1342                 /*
1343                  * If the notrace hash has no items,
1344                  * then there's nothing to do.
1345                  */
1346                 if (hash && !hash->count)
1347                         return;
1348         }
1349
1350         do_for_each_ftrace_rec(pg, rec) {
1351                 int in_other_hash = 0;
1352                 int in_hash = 0;
1353                 int match = 0;
1354
1355                 if (all) {
1356                         /*
1357                          * Only the filter_hash affects all records.
1358                          * Update if the record is not in the notrace hash.
1359                          */
1360                         if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1361                                 match = 1;
1362                 } else {
1363                         in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
1364                         in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
1365
1366                         /*
1367                          *
1368                          */
1369                         if (filter_hash && in_hash && !in_other_hash)
1370                                 match = 1;
1371                         else if (!filter_hash && in_hash &&
1372                                  (in_other_hash || !other_hash->count))
1373                                 match = 1;
1374                 }
1375                 if (!match)
1376                         continue;
1377
1378                 if (inc) {
1379                         rec->flags++;
1380                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1381                                 return;
1382                 } else {
1383                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1384                                 return;
1385                         rec->flags--;
1386                 }
1387                 count++;
1388                 /* Shortcut, if we handled all records, we are done. */
1389                 if (!all && count == hash->count)
1390                         return;
1391         } while_for_each_ftrace_rec();
1392 }
1393
1394 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1395                                     int filter_hash)
1396 {
1397         __ftrace_hash_rec_update(ops, filter_hash, 0);
1398 }
1399
1400 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1401                                    int filter_hash)
1402 {
1403         __ftrace_hash_rec_update(ops, filter_hash, 1);
1404 }
1405
1406 static void ftrace_free_rec(struct dyn_ftrace *rec)
1407 {
1408         rec->freelist = ftrace_free_records;
1409         ftrace_free_records = rec;
1410         rec->flags |= FTRACE_FL_FREE;
1411 }
1412
1413 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1414 {
1415         struct dyn_ftrace *rec;
1416
1417         /* First check for freed records */
1418         if (ftrace_free_records) {
1419                 rec = ftrace_free_records;
1420
1421                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1422                         FTRACE_WARN_ON_ONCE(1);
1423                         ftrace_free_records = NULL;
1424                         return NULL;
1425                 }
1426
1427                 ftrace_free_records = rec->freelist;
1428                 memset(rec, 0, sizeof(*rec));
1429                 return rec;
1430         }
1431
1432         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1433                 if (!ftrace_pages->next) {
1434                         /* allocate another page */
1435                         ftrace_pages->next =
1436                                 (void *)get_zeroed_page(GFP_KERNEL);
1437                         if (!ftrace_pages->next)
1438                                 return NULL;
1439                 }
1440                 ftrace_pages = ftrace_pages->next;
1441         }
1442
1443         return &ftrace_pages->records[ftrace_pages->index++];
1444 }
1445
1446 static struct dyn_ftrace *
1447 ftrace_record_ip(unsigned long ip)
1448 {
1449         struct dyn_ftrace *rec;
1450
1451         if (ftrace_disabled)
1452                 return NULL;
1453
1454         rec = ftrace_alloc_dyn_node(ip);
1455         if (!rec)
1456                 return NULL;
1457
1458         rec->ip = ip;
1459         rec->newlist = ftrace_new_addrs;
1460         ftrace_new_addrs = rec;
1461
1462         return rec;
1463 }
1464
1465 static void print_ip_ins(const char *fmt, unsigned char *p)
1466 {
1467         int i;
1468
1469         printk(KERN_CONT "%s", fmt);
1470
1471         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1472                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1473 }
1474
1475 static void ftrace_bug(int failed, unsigned long ip)
1476 {
1477         switch (failed) {
1478         case -EFAULT:
1479                 FTRACE_WARN_ON_ONCE(1);
1480                 pr_info("ftrace faulted on modifying ");
1481                 print_ip_sym(ip);
1482                 break;
1483         case -EINVAL:
1484                 FTRACE_WARN_ON_ONCE(1);
1485                 pr_info("ftrace failed to modify ");
1486                 print_ip_sym(ip);
1487                 print_ip_ins(" actual: ", (unsigned char *)ip);
1488                 printk(KERN_CONT "\n");
1489                 break;
1490         case -EPERM:
1491                 FTRACE_WARN_ON_ONCE(1);
1492                 pr_info("ftrace faulted on writing ");
1493                 print_ip_sym(ip);
1494                 break;
1495         default:
1496                 FTRACE_WARN_ON_ONCE(1);
1497                 pr_info("ftrace faulted on unknown error ");
1498                 print_ip_sym(ip);
1499         }
1500 }
1501
1502
1503 /* Return 1 if the address range is reserved for ftrace */
1504 int ftrace_text_reserved(void *start, void *end)
1505 {
1506         struct dyn_ftrace *rec;
1507         struct ftrace_page *pg;
1508
1509         do_for_each_ftrace_rec(pg, rec) {
1510                 if (rec->ip <= (unsigned long)end &&
1511                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1512                         return 1;
1513         } while_for_each_ftrace_rec();
1514         return 0;
1515 }
1516
1517
1518 static int
1519 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1520 {
1521         unsigned long ftrace_addr;
1522         unsigned long flag = 0UL;
1523
1524         ftrace_addr = (unsigned long)FTRACE_ADDR;
1525
1526         /*
1527          * If we are enabling tracing:
1528          *
1529          *   If the record has a ref count, then we need to enable it
1530          *   because someone is using it.
1531          *
1532          *   Otherwise we make sure its disabled.
1533          *
1534          * If we are disabling tracing, then disable all records that
1535          * are enabled.
1536          */
1537         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1538                 flag = FTRACE_FL_ENABLED;
1539
1540         /* If the state of this record hasn't changed, then do nothing */
1541         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1542                 return 0;
1543
1544         if (flag) {
1545                 rec->flags |= FTRACE_FL_ENABLED;
1546                 return ftrace_make_call(rec, ftrace_addr);
1547         }
1548
1549         rec->flags &= ~FTRACE_FL_ENABLED;
1550         return ftrace_make_nop(NULL, rec, ftrace_addr);
1551 }
1552
1553 static void ftrace_replace_code(int enable)
1554 {
1555         struct dyn_ftrace *rec;
1556         struct ftrace_page *pg;
1557         int failed;
1558
1559         if (unlikely(ftrace_disabled))
1560                 return;
1561
1562         do_for_each_ftrace_rec(pg, rec) {
1563                 /* Skip over free records */
1564                 if (rec->flags & FTRACE_FL_FREE)
1565                         continue;
1566
1567                 failed = __ftrace_replace_code(rec, enable);
1568                 if (failed) {
1569                         ftrace_bug(failed, rec->ip);
1570                         /* Stop processing */
1571                         return;
1572                 }
1573         } while_for_each_ftrace_rec();
1574 }
1575
1576 static int
1577 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1578 {
1579         unsigned long ip;
1580         int ret;
1581
1582         ip = rec->ip;
1583
1584         if (unlikely(ftrace_disabled))
1585                 return 0;
1586
1587         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1588         if (ret) {
1589                 ftrace_bug(ret, ip);
1590                 return 0;
1591         }
1592         return 1;
1593 }
1594
1595 /*
1596  * archs can override this function if they must do something
1597  * before the modifying code is performed.
1598  */
1599 int __weak ftrace_arch_code_modify_prepare(void)
1600 {
1601         return 0;
1602 }
1603
1604 /*
1605  * archs can override this function if they must do something
1606  * after the modifying code is performed.
1607  */
1608 int __weak ftrace_arch_code_modify_post_process(void)
1609 {
1610         return 0;
1611 }
1612
1613 static int __ftrace_modify_code(void *data)
1614 {
1615         int *command = data;
1616
1617         /*
1618          * Do not call function tracer while we update the code.
1619          * We are in stop machine, no worrying about races.
1620          */
1621         function_trace_stop++;
1622
1623         if (*command & FTRACE_ENABLE_CALLS)
1624                 ftrace_replace_code(1);
1625         else if (*command & FTRACE_DISABLE_CALLS)
1626                 ftrace_replace_code(0);
1627
1628         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1629                 ftrace_update_ftrace_func(ftrace_trace_function);
1630
1631         if (*command & FTRACE_START_FUNC_RET)
1632                 ftrace_enable_ftrace_graph_caller();
1633         else if (*command & FTRACE_STOP_FUNC_RET)
1634                 ftrace_disable_ftrace_graph_caller();
1635
1636 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
1637         /*
1638          * For archs that call ftrace_test_stop_func(), we must
1639          * wait till after we update all the function callers
1640          * before we update the callback. This keeps different
1641          * ops that record different functions from corrupting
1642          * each other.
1643          */
1644         __ftrace_trace_function = __ftrace_trace_function_delay;
1645 #endif
1646         function_trace_stop--;
1647
1648         return 0;
1649 }
1650
1651 static void ftrace_run_update_code(int command)
1652 {
1653         int ret;
1654
1655         ret = ftrace_arch_code_modify_prepare();
1656         FTRACE_WARN_ON(ret);
1657         if (ret)
1658                 return;
1659
1660         stop_machine(__ftrace_modify_code, &command, NULL);
1661
1662         ret = ftrace_arch_code_modify_post_process();
1663         FTRACE_WARN_ON(ret);
1664 }
1665
1666 static ftrace_func_t saved_ftrace_func;
1667 static int ftrace_start_up;
1668 static int global_start_up;
1669
1670 static void ftrace_startup_enable(int command)
1671 {
1672         if (saved_ftrace_func != ftrace_trace_function) {
1673                 saved_ftrace_func = ftrace_trace_function;
1674                 command |= FTRACE_UPDATE_TRACE_FUNC;
1675         }
1676
1677         if (!command || !ftrace_enabled)
1678                 return;
1679
1680         ftrace_run_update_code(command);
1681 }
1682
1683 static int ftrace_startup(struct ftrace_ops *ops, int command)
1684 {
1685         bool hash_enable = true;
1686
1687         if (unlikely(ftrace_disabled))
1688                 return -ENODEV;
1689
1690         ftrace_start_up++;
1691         command |= FTRACE_ENABLE_CALLS;
1692
1693         /* ops marked global share the filter hashes */
1694         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1695                 ops = &global_ops;
1696                 /* Don't update hash if global is already set */
1697                 if (global_start_up)
1698                         hash_enable = false;
1699                 global_start_up++;
1700         }
1701
1702         ops->flags |= FTRACE_OPS_FL_ENABLED;
1703         if (hash_enable)
1704                 ftrace_hash_rec_enable(ops, 1);
1705
1706         ftrace_startup_enable(command);
1707
1708         return 0;
1709 }
1710
1711 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1712 {
1713         bool hash_disable = true;
1714
1715         if (unlikely(ftrace_disabled))
1716                 return;
1717
1718         ftrace_start_up--;
1719         /*
1720          * Just warn in case of unbalance, no need to kill ftrace, it's not
1721          * critical but the ftrace_call callers may be never nopped again after
1722          * further ftrace uses.
1723          */
1724         WARN_ON_ONCE(ftrace_start_up < 0);
1725
1726         if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
1727                 ops = &global_ops;
1728                 global_start_up--;
1729                 WARN_ON_ONCE(global_start_up < 0);
1730                 /* Don't update hash if global still has users */
1731                 if (global_start_up) {
1732                         WARN_ON_ONCE(!ftrace_start_up);
1733                         hash_disable = false;
1734                 }
1735         }
1736
1737         if (hash_disable)
1738                 ftrace_hash_rec_disable(ops, 1);
1739
1740         if (ops != &global_ops || !global_start_up)
1741                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1742
1743         if (!ftrace_start_up)
1744                 command |= FTRACE_DISABLE_CALLS;
1745
1746         if (saved_ftrace_func != ftrace_trace_function) {
1747                 saved_ftrace_func = ftrace_trace_function;
1748                 command |= FTRACE_UPDATE_TRACE_FUNC;
1749         }
1750
1751         if (!command || !ftrace_enabled)
1752                 return;
1753
1754         ftrace_run_update_code(command);
1755 }
1756
1757 static void ftrace_startup_sysctl(void)
1758 {
1759         if (unlikely(ftrace_disabled))
1760                 return;
1761
1762         /* Force update next time */
1763         saved_ftrace_func = NULL;
1764         /* ftrace_start_up is true if we want ftrace running */
1765         if (ftrace_start_up)
1766                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1767 }
1768
1769 static void ftrace_shutdown_sysctl(void)
1770 {
1771         if (unlikely(ftrace_disabled))
1772                 return;
1773
1774         /* ftrace_start_up is true if ftrace is running */
1775         if (ftrace_start_up)
1776                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1777 }
1778
1779 static cycle_t          ftrace_update_time;
1780 static unsigned long    ftrace_update_cnt;
1781 unsigned long           ftrace_update_tot_cnt;
1782
1783 static int ops_traces_mod(struct ftrace_ops *ops)
1784 {
1785         struct ftrace_hash *hash;
1786
1787         hash = ops->filter_hash;
1788         return !!(!hash || !hash->count);
1789 }
1790
1791 static int ftrace_update_code(struct module *mod)
1792 {
1793         struct dyn_ftrace *p;
1794         cycle_t start, stop;
1795         unsigned long ref = 0;
1796
1797         /*
1798          * When adding a module, we need to check if tracers are
1799          * currently enabled and if they are set to trace all functions.
1800          * If they are, we need to enable the module functions as well
1801          * as update the reference counts for those function records.
1802          */
1803         if (mod) {
1804                 struct ftrace_ops *ops;
1805
1806                 for (ops = ftrace_ops_list;
1807                      ops != &ftrace_list_end; ops = ops->next) {
1808                         if (ops->flags & FTRACE_OPS_FL_ENABLED &&
1809                             ops_traces_mod(ops))
1810                                 ref++;
1811                 }
1812         }
1813
1814         start = ftrace_now(raw_smp_processor_id());
1815         ftrace_update_cnt = 0;
1816
1817         while (ftrace_new_addrs) {
1818
1819                 /* If something went wrong, bail without enabling anything */
1820                 if (unlikely(ftrace_disabled))
1821                         return -1;
1822
1823                 p = ftrace_new_addrs;
1824                 ftrace_new_addrs = p->newlist;
1825                 p->flags = ref;
1826
1827                 /*
1828                  * Do the initial record conversion from mcount jump
1829                  * to the NOP instructions.
1830                  */
1831                 if (!ftrace_code_disable(mod, p)) {
1832                         ftrace_free_rec(p);
1833                         /* Game over */
1834                         break;
1835                 }
1836
1837                 ftrace_update_cnt++;
1838
1839                 /*
1840                  * If the tracing is enabled, go ahead and enable the record.
1841                  *
1842                  * The reason not to enable the record immediatelly is the
1843                  * inherent check of ftrace_make_nop/ftrace_make_call for
1844                  * correct previous instructions.  Making first the NOP
1845                  * conversion puts the module to the correct state, thus
1846                  * passing the ftrace_make_call check.
1847                  */
1848                 if (ftrace_start_up && ref) {
1849                         int failed = __ftrace_replace_code(p, 1);
1850                         if (failed) {
1851                                 ftrace_bug(failed, p->ip);
1852                                 ftrace_free_rec(p);
1853                         }
1854                 }
1855         }
1856
1857         stop = ftrace_now(raw_smp_processor_id());
1858         ftrace_update_time = stop - start;
1859         ftrace_update_tot_cnt += ftrace_update_cnt;
1860
1861         return 0;
1862 }
1863
1864 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1865 {
1866         struct ftrace_page *pg;
1867         int cnt;
1868         int i;
1869
1870         /* allocate a few pages */
1871         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1872         if (!ftrace_pages_start)
1873                 return -1;
1874
1875         /*
1876          * Allocate a few more pages.
1877          *
1878          * TODO: have some parser search vmlinux before
1879          *   final linking to find all calls to ftrace.
1880          *   Then we can:
1881          *    a) know how many pages to allocate.
1882          *     and/or
1883          *    b) set up the table then.
1884          *
1885          *  The dynamic code is still necessary for
1886          *  modules.
1887          */
1888
1889         pg = ftrace_pages = ftrace_pages_start;
1890
1891         cnt = num_to_init / ENTRIES_PER_PAGE;
1892         pr_info("ftrace: allocating %ld entries in %d pages\n",
1893                 num_to_init, cnt + 1);
1894
1895         for (i = 0; i < cnt; i++) {
1896                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1897
1898                 /* If we fail, we'll try later anyway */
1899                 if (!pg->next)
1900                         break;
1901
1902                 pg = pg->next;
1903         }
1904
1905         return 0;
1906 }
1907
1908 enum {
1909         FTRACE_ITER_FILTER      = (1 << 0),
1910         FTRACE_ITER_NOTRACE     = (1 << 1),
1911         FTRACE_ITER_PRINTALL    = (1 << 2),
1912         FTRACE_ITER_HASH        = (1 << 3),
1913         FTRACE_ITER_ENABLED     = (1 << 4),
1914 };
1915
1916 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1917
1918 struct ftrace_iterator {
1919         loff_t                          pos;
1920         loff_t                          func_pos;
1921         struct ftrace_page              *pg;
1922         struct dyn_ftrace               *func;
1923         struct ftrace_func_probe        *probe;
1924         struct trace_parser             parser;
1925         struct ftrace_hash              *hash;
1926         struct ftrace_ops               *ops;
1927         int                             hidx;
1928         int                             idx;
1929         unsigned                        flags;
1930 };
1931
1932 static void *
1933 t_hash_next(struct seq_file *m, loff_t *pos)
1934 {
1935         struct ftrace_iterator *iter = m->private;
1936         struct hlist_node *hnd = NULL;
1937         struct hlist_head *hhd;
1938
1939         (*pos)++;
1940         iter->pos = *pos;
1941
1942         if (iter->probe)
1943                 hnd = &iter->probe->node;
1944  retry:
1945         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1946                 return NULL;
1947
1948         hhd = &ftrace_func_hash[iter->hidx];
1949
1950         if (hlist_empty(hhd)) {
1951                 iter->hidx++;
1952                 hnd = NULL;
1953                 goto retry;
1954         }
1955
1956         if (!hnd)
1957                 hnd = hhd->first;
1958         else {
1959                 hnd = hnd->next;
1960                 if (!hnd) {
1961                         iter->hidx++;
1962                         goto retry;
1963                 }
1964         }
1965
1966         if (WARN_ON_ONCE(!hnd))
1967                 return NULL;
1968
1969         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1970
1971         return iter;
1972 }
1973
1974 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1975 {
1976         struct ftrace_iterator *iter = m->private;
1977         void *p = NULL;
1978         loff_t l;
1979
1980         if (iter->func_pos > *pos)
1981                 return NULL;
1982
1983         iter->hidx = 0;
1984         for (l = 0; l <= (*pos - iter->func_pos); ) {
1985                 p = t_hash_next(m, &l);
1986                 if (!p)
1987                         break;
1988         }
1989         if (!p)
1990                 return NULL;
1991
1992         /* Only set this if we have an item */
1993         iter->flags |= FTRACE_ITER_HASH;
1994
1995         return iter;
1996 }
1997
1998 static int
1999 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2000 {
2001         struct ftrace_func_probe *rec;
2002
2003         rec = iter->probe;
2004         if (WARN_ON_ONCE(!rec))
2005                 return -EIO;
2006
2007         if (rec->ops->print)
2008                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2009
2010         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2011
2012         if (rec->data)
2013                 seq_printf(m, ":%p", rec->data);
2014         seq_putc(m, '\n');
2015
2016         return 0;
2017 }
2018
2019 static void *
2020 t_next(struct seq_file *m, void *v, loff_t *pos)
2021 {
2022         struct ftrace_iterator *iter = m->private;
2023         struct ftrace_ops *ops = &global_ops;
2024         struct dyn_ftrace *rec = NULL;
2025
2026         if (unlikely(ftrace_disabled))
2027                 return NULL;
2028
2029         if (iter->flags & FTRACE_ITER_HASH)
2030                 return t_hash_next(m, pos);
2031
2032         (*pos)++;
2033         iter->pos = iter->func_pos = *pos;
2034
2035         if (iter->flags & FTRACE_ITER_PRINTALL)
2036                 return t_hash_start(m, pos);
2037
2038  retry:
2039         if (iter->idx >= iter->pg->index) {
2040                 if (iter->pg->next) {
2041                         iter->pg = iter->pg->next;
2042                         iter->idx = 0;
2043                         goto retry;
2044                 }
2045         } else {
2046                 rec = &iter->pg->records[iter->idx++];
2047                 if ((rec->flags & FTRACE_FL_FREE) ||
2048
2049                     ((iter->flags & FTRACE_ITER_FILTER) &&
2050                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2051
2052                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
2053                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2054
2055                     ((iter->flags & FTRACE_ITER_ENABLED) &&
2056                      !(rec->flags & ~FTRACE_FL_MASK))) {
2057
2058                         rec = NULL;
2059                         goto retry;
2060                 }
2061         }
2062
2063         if (!rec)
2064                 return t_hash_start(m, pos);
2065
2066         iter->func = rec;
2067
2068         return iter;
2069 }
2070
2071 static void reset_iter_read(struct ftrace_iterator *iter)
2072 {
2073         iter->pos = 0;
2074         iter->func_pos = 0;
2075         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
2076 }
2077
2078 static void *t_start(struct seq_file *m, loff_t *pos)
2079 {
2080         struct ftrace_iterator *iter = m->private;
2081         struct ftrace_ops *ops = &global_ops;
2082         void *p = NULL;
2083         loff_t l;
2084
2085         mutex_lock(&ftrace_lock);
2086
2087         if (unlikely(ftrace_disabled))
2088                 return NULL;
2089
2090         /*
2091          * If an lseek was done, then reset and start from beginning.
2092          */
2093         if (*pos < iter->pos)
2094                 reset_iter_read(iter);
2095
2096         /*
2097          * For set_ftrace_filter reading, if we have the filter
2098          * off, we can short cut and just print out that all
2099          * functions are enabled.
2100          */
2101         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
2102                 if (*pos > 0)
2103                         return t_hash_start(m, pos);
2104                 iter->flags |= FTRACE_ITER_PRINTALL;
2105                 /* reset in case of seek/pread */
2106                 iter->flags &= ~FTRACE_ITER_HASH;
2107                 return iter;
2108         }
2109
2110         if (iter->flags & FTRACE_ITER_HASH)
2111                 return t_hash_start(m, pos);
2112
2113         /*
2114          * Unfortunately, we need to restart at ftrace_pages_start
2115          * every time we let go of the ftrace_mutex. This is because
2116          * those pointers can change without the lock.
2117          */
2118         iter->pg = ftrace_pages_start;
2119         iter->idx = 0;
2120         for (l = 0; l <= *pos; ) {
2121                 p = t_next(m, p, &l);
2122                 if (!p)
2123                         break;
2124         }
2125
2126         if (!p) {
2127                 if (iter->flags & FTRACE_ITER_FILTER)
2128                         return t_hash_start(m, pos);
2129
2130                 return NULL;
2131         }
2132
2133         return iter;
2134 }
2135
2136 static void t_stop(struct seq_file *m, void *p)
2137 {
2138         mutex_unlock(&ftrace_lock);
2139 }
2140
2141 static int t_show(struct seq_file *m, void *v)
2142 {
2143         struct ftrace_iterator *iter = m->private;
2144         struct dyn_ftrace *rec;
2145
2146         if (iter->flags & FTRACE_ITER_HASH)
2147                 return t_hash_show(m, iter);
2148
2149         if (iter->flags & FTRACE_ITER_PRINTALL) {
2150                 seq_printf(m, "#### all functions enabled ####\n");
2151                 return 0;
2152         }
2153
2154         rec = iter->func;
2155
2156         if (!rec)
2157                 return 0;
2158
2159         seq_printf(m, "%ps", (void *)rec->ip);
2160         if (iter->flags & FTRACE_ITER_ENABLED)
2161                 seq_printf(m, " (%ld)",
2162                            rec->flags & ~FTRACE_FL_MASK);
2163         seq_printf(m, "\n");
2164
2165         return 0;
2166 }
2167
2168 static const struct seq_operations show_ftrace_seq_ops = {
2169         .start = t_start,
2170         .next = t_next,
2171         .stop = t_stop,
2172         .show = t_show,
2173 };
2174
2175 static int
2176 ftrace_avail_open(struct inode *inode, struct file *file)
2177 {
2178         struct ftrace_iterator *iter;
2179         int ret;
2180
2181         if (unlikely(ftrace_disabled))
2182                 return -ENODEV;
2183
2184         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2185         if (!iter)
2186                 return -ENOMEM;
2187
2188         iter->pg = ftrace_pages_start;
2189
2190         ret = seq_open(file, &show_ftrace_seq_ops);
2191         if (!ret) {
2192                 struct seq_file *m = file->private_data;
2193
2194                 m->private = iter;
2195         } else {
2196                 kfree(iter);
2197         }
2198
2199         return ret;
2200 }
2201
2202 static int
2203 ftrace_enabled_open(struct inode *inode, struct file *file)
2204 {
2205         struct ftrace_iterator *iter;
2206         int ret;
2207
2208         if (unlikely(ftrace_disabled))
2209                 return -ENODEV;
2210
2211         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2212         if (!iter)
2213                 return -ENOMEM;
2214
2215         iter->pg = ftrace_pages_start;
2216         iter->flags = FTRACE_ITER_ENABLED;
2217
2218         ret = seq_open(file, &show_ftrace_seq_ops);
2219         if (!ret) {
2220                 struct seq_file *m = file->private_data;
2221
2222                 m->private = iter;
2223         } else {
2224                 kfree(iter);
2225         }
2226
2227         return ret;
2228 }
2229
2230 static void ftrace_filter_reset(struct ftrace_hash *hash)
2231 {
2232         mutex_lock(&ftrace_lock);
2233         ftrace_hash_clear(hash);
2234         mutex_unlock(&ftrace_lock);
2235 }
2236
2237 static int
2238 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2239                   struct inode *inode, struct file *file)
2240 {
2241         struct ftrace_iterator *iter;
2242         struct ftrace_hash *hash;
2243         int ret = 0;
2244
2245         if (unlikely(ftrace_disabled))
2246                 return -ENODEV;
2247
2248         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2249         if (!iter)
2250                 return -ENOMEM;
2251
2252         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2253                 kfree(iter);
2254                 return -ENOMEM;
2255         }
2256
2257         if (flag & FTRACE_ITER_NOTRACE)
2258                 hash = ops->notrace_hash;
2259         else
2260                 hash = ops->filter_hash;
2261
2262         iter->ops = ops;
2263         iter->flags = flag;
2264
2265         if (file->f_mode & FMODE_WRITE) {
2266                 mutex_lock(&ftrace_lock);
2267                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2268                 mutex_unlock(&ftrace_lock);
2269
2270                 if (!iter->hash) {
2271                         trace_parser_put(&iter->parser);
2272                         kfree(iter);
2273                         return -ENOMEM;
2274                 }
2275         }
2276
2277         mutex_lock(&ftrace_regex_lock);
2278
2279         if ((file->f_mode & FMODE_WRITE) &&
2280             (file->f_flags & O_TRUNC))
2281                 ftrace_filter_reset(iter->hash);
2282
2283         if (file->f_mode & FMODE_READ) {
2284                 iter->pg = ftrace_pages_start;
2285
2286                 ret = seq_open(file, &show_ftrace_seq_ops);
2287                 if (!ret) {
2288                         struct seq_file *m = file->private_data;
2289                         m->private = iter;
2290                 } else {
2291                         /* Failed */
2292                         free_ftrace_hash(iter->hash);
2293                         trace_parser_put(&iter->parser);
2294                         kfree(iter);
2295                 }
2296         } else
2297                 file->private_data = iter;
2298         mutex_unlock(&ftrace_regex_lock);
2299
2300         return ret;
2301 }
2302
2303 static int
2304 ftrace_filter_open(struct inode *inode, struct file *file)
2305 {
2306         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2307                                  inode, file);
2308 }
2309
2310 static int
2311 ftrace_notrace_open(struct inode *inode, struct file *file)
2312 {
2313         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2314                                  inode, file);
2315 }
2316
2317 static loff_t
2318 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2319 {
2320         loff_t ret;
2321
2322         if (file->f_mode & FMODE_READ)
2323                 ret = seq_lseek(file, offset, origin);
2324         else
2325                 file->f_pos = ret = 1;
2326
2327         return ret;
2328 }
2329
2330 static int ftrace_match(char *str, char *regex, int len, int type)
2331 {
2332         int matched = 0;
2333         int slen;
2334
2335         switch (type) {
2336         case MATCH_FULL:
2337                 if (strcmp(str, regex) == 0)
2338                         matched = 1;
2339                 break;
2340         case MATCH_FRONT_ONLY:
2341                 if (strncmp(str, regex, len) == 0)
2342                         matched = 1;
2343                 break;
2344         case MATCH_MIDDLE_ONLY:
2345                 if (strstr(str, regex))
2346                         matched = 1;
2347                 break;
2348         case MATCH_END_ONLY:
2349                 slen = strlen(str);
2350                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2351                         matched = 1;
2352                 break;
2353         }
2354
2355         return matched;
2356 }
2357
2358 static int
2359 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2360 {
2361         struct ftrace_func_entry *entry;
2362         int ret = 0;
2363
2364         entry = ftrace_lookup_ip(hash, rec->ip);
2365         if (not) {
2366                 /* Do nothing if it doesn't exist */
2367                 if (!entry)
2368                         return 0;
2369
2370                 free_hash_entry(hash, entry);
2371         } else {
2372                 /* Do nothing if it exists */
2373                 if (entry)
2374                         return 0;
2375
2376                 ret = add_hash_entry(hash, rec->ip);
2377         }
2378         return ret;
2379 }
2380
2381 static int
2382 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2383                     char *regex, int len, int type)
2384 {
2385         char str[KSYM_SYMBOL_LEN];
2386         char *modname;
2387
2388         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2389
2390         if (mod) {
2391                 /* module lookup requires matching the module */
2392                 if (!modname || strcmp(modname, mod))
2393                         return 0;
2394
2395                 /* blank search means to match all funcs in the mod */
2396                 if (!len)
2397                         return 1;
2398         }
2399
2400         return ftrace_match(str, regex, len, type);
2401 }
2402
2403 static int
2404 match_records(struct ftrace_hash *hash, char *buff,
2405               int len, char *mod, int not)
2406 {
2407         unsigned search_len = 0;
2408         struct ftrace_page *pg;
2409         struct dyn_ftrace *rec;
2410         int type = MATCH_FULL;
2411         char *search = buff;
2412         int found = 0;
2413         int ret;
2414
2415         if (len) {
2416                 type = filter_parse_regex(buff, len, &search, &not);
2417                 search_len = strlen(search);
2418         }
2419
2420         mutex_lock(&ftrace_lock);
2421
2422         if (unlikely(ftrace_disabled))
2423                 goto out_unlock;
2424
2425         do_for_each_ftrace_rec(pg, rec) {
2426
2427                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2428                         ret = enter_record(hash, rec, not);
2429                         if (ret < 0) {
2430                                 found = ret;
2431                                 goto out_unlock;
2432                         }
2433                         found = 1;
2434                 }
2435         } while_for_each_ftrace_rec();
2436  out_unlock:
2437         mutex_unlock(&ftrace_lock);
2438
2439         return found;
2440 }
2441
2442 static int
2443 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2444 {
2445         return match_records(hash, buff, len, NULL, 0);
2446 }
2447
2448 static int
2449 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2450 {
2451         int not = 0;
2452
2453         /* blank or '*' mean the same */
2454         if (strcmp(buff, "*") == 0)
2455                 buff[0] = 0;
2456
2457         /* handle the case of 'dont filter this module' */
2458         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2459                 buff[0] = 0;
2460                 not = 1;
2461         }
2462
2463         return match_records(hash, buff, strlen(buff), mod, not);
2464 }
2465
2466 /*
2467  * We register the module command as a template to show others how
2468  * to register the a command as well.
2469  */
2470
2471 static int
2472 ftrace_mod_callback(struct ftrace_hash *hash,
2473                     char *func, char *cmd, char *param, int enable)
2474 {
2475         char *mod;
2476         int ret = -EINVAL;
2477
2478         /*
2479          * cmd == 'mod' because we only registered this func
2480          * for the 'mod' ftrace_func_command.
2481          * But if you register one func with multiple commands,
2482          * you can tell which command was used by the cmd
2483          * parameter.
2484          */
2485
2486         /* we must have a module name */
2487         if (!param)
2488                 return ret;
2489
2490         mod = strsep(&param, ":");
2491         if (!strlen(mod))
2492                 return ret;
2493
2494         ret = ftrace_match_module_records(hash, func, mod);
2495         if (!ret)
2496                 ret = -EINVAL;
2497         if (ret < 0)
2498                 return ret;
2499
2500         return 0;
2501 }
2502
2503 static struct ftrace_func_command ftrace_mod_cmd = {
2504         .name                   = "mod",
2505         .func                   = ftrace_mod_callback,
2506 };
2507
2508 static int __init ftrace_mod_cmd_init(void)
2509 {
2510         return register_ftrace_command(&ftrace_mod_cmd);
2511 }
2512 device_initcall(ftrace_mod_cmd_init);
2513
2514 static void
2515 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2516 {
2517         struct ftrace_func_probe *entry;
2518         struct hlist_head *hhd;
2519         struct hlist_node *n;
2520         unsigned long key;
2521
2522         key = hash_long(ip, FTRACE_HASH_BITS);
2523
2524         hhd = &ftrace_func_hash[key];
2525
2526         if (hlist_empty(hhd))
2527                 return;
2528
2529         /*
2530          * Disable preemption for these calls to prevent a RCU grace
2531          * period. This syncs the hash iteration and freeing of items
2532          * on the hash. rcu_read_lock is too dangerous here.
2533          */
2534         preempt_disable_notrace();
2535         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2536                 if (entry->ip == ip)
2537                         entry->ops->func(ip, parent_ip, &entry->data);
2538         }
2539         preempt_enable_notrace();
2540 }
2541
2542 static struct ftrace_ops trace_probe_ops __read_mostly =
2543 {
2544         .func           = function_trace_probe_call,
2545 };
2546
2547 static int ftrace_probe_registered;
2548
2549 static void __enable_ftrace_function_probe(void)
2550 {
2551         int ret;
2552         int i;
2553
2554         if (ftrace_probe_registered)
2555                 return;
2556
2557         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2558                 struct hlist_head *hhd = &ftrace_func_hash[i];
2559                 if (hhd->first)
2560                         break;
2561         }
2562         /* Nothing registered? */
2563         if (i == FTRACE_FUNC_HASHSIZE)
2564                 return;
2565
2566         ret = __register_ftrace_function(&trace_probe_ops);
2567         if (!ret)
2568                 ret = ftrace_startup(&trace_probe_ops, 0);
2569
2570         ftrace_probe_registered = 1;
2571 }
2572
2573 static void __disable_ftrace_function_probe(void)
2574 {
2575         int ret;
2576         int i;
2577
2578         if (!ftrace_probe_registered)
2579                 return;
2580
2581         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2582                 struct hlist_head *hhd = &ftrace_func_hash[i];
2583                 if (hhd->first)
2584                         return;
2585         }
2586
2587         /* no more funcs left */
2588         ret = __unregister_ftrace_function(&trace_probe_ops);
2589         if (!ret)
2590                 ftrace_shutdown(&trace_probe_ops, 0);
2591
2592         ftrace_probe_registered = 0;
2593 }
2594
2595
2596 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2597 {
2598         struct ftrace_func_probe *entry =
2599                 container_of(rhp, struct ftrace_func_probe, rcu);
2600
2601         if (entry->ops->free)
2602                 entry->ops->free(&entry->data);
2603         kfree(entry);
2604 }
2605
2606
2607 int
2608 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2609                               void *data)
2610 {
2611         struct ftrace_func_probe *entry;
2612         struct ftrace_page *pg;
2613         struct dyn_ftrace *rec;
2614         int type, len, not;
2615         unsigned long key;
2616         int count = 0;
2617         char *search;
2618
2619         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2620         len = strlen(search);
2621
2622         /* we do not support '!' for function probes */
2623         if (WARN_ON(not))
2624                 return -EINVAL;
2625
2626         mutex_lock(&ftrace_lock);
2627
2628         if (unlikely(ftrace_disabled))
2629                 goto out_unlock;
2630
2631         do_for_each_ftrace_rec(pg, rec) {
2632
2633                 if (!ftrace_match_record(rec, NULL, search, len, type))
2634                         continue;
2635
2636                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2637                 if (!entry) {
2638                         /* If we did not process any, then return error */
2639                         if (!count)
2640                                 count = -ENOMEM;
2641                         goto out_unlock;
2642                 }
2643
2644                 count++;
2645
2646                 entry->data = data;
2647
2648                 /*
2649                  * The caller might want to do something special
2650                  * for each function we find. We call the callback
2651                  * to give the caller an opportunity to do so.
2652                  */
2653                 if (ops->callback) {
2654                         if (ops->callback(rec->ip, &entry->data) < 0) {
2655                                 /* caller does not like this func */
2656                                 kfree(entry);
2657                                 continue;
2658                         }
2659                 }
2660
2661                 entry->ops = ops;
2662                 entry->ip = rec->ip;
2663
2664                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2665                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2666
2667         } while_for_each_ftrace_rec();
2668         __enable_ftrace_function_probe();
2669
2670  out_unlock:
2671         mutex_unlock(&ftrace_lock);
2672
2673         return count;
2674 }
2675
2676 enum {
2677         PROBE_TEST_FUNC         = 1,
2678         PROBE_TEST_DATA         = 2
2679 };
2680
2681 static void
2682 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2683                                   void *data, int flags)
2684 {
2685         struct ftrace_func_probe *entry;
2686         struct hlist_node *n, *tmp;
2687         char str[KSYM_SYMBOL_LEN];
2688         int type = MATCH_FULL;
2689         int i, len = 0;
2690         char *search;
2691
2692         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2693                 glob = NULL;
2694         else if (glob) {
2695                 int not;
2696
2697                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2698                 len = strlen(search);
2699
2700                 /* we do not support '!' for function probes */
2701                 if (WARN_ON(not))
2702                         return;
2703         }
2704
2705         mutex_lock(&ftrace_lock);
2706         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2707                 struct hlist_head *hhd = &ftrace_func_hash[i];
2708
2709                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2710
2711                         /* break up if statements for readability */
2712                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2713                                 continue;
2714
2715                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2716                                 continue;
2717
2718                         /* do this last, since it is the most expensive */
2719                         if (glob) {
2720                                 kallsyms_lookup(entry->ip, NULL, NULL,
2721                                                 NULL, str);
2722                                 if (!ftrace_match(str, glob, len, type))
2723                                         continue;
2724                         }
2725
2726                         hlist_del(&entry->node);
2727                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2728                 }
2729         }
2730         __disable_ftrace_function_probe();
2731         mutex_unlock(&ftrace_lock);
2732 }
2733
2734 void
2735 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2736                                 void *data)
2737 {
2738         __unregister_ftrace_function_probe(glob, ops, data,
2739                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2740 }
2741
2742 void
2743 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2744 {
2745         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2746 }
2747
2748 void unregister_ftrace_function_probe_all(char *glob)
2749 {
2750         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2751 }
2752
2753 static LIST_HEAD(ftrace_commands);
2754 static DEFINE_MUTEX(ftrace_cmd_mutex);
2755
2756 int register_ftrace_command(struct ftrace_func_command *cmd)
2757 {
2758         struct ftrace_func_command *p;
2759         int ret = 0;
2760
2761         mutex_lock(&ftrace_cmd_mutex);
2762         list_for_each_entry(p, &ftrace_commands, list) {
2763                 if (strcmp(cmd->name, p->name) == 0) {
2764                         ret = -EBUSY;
2765                         goto out_unlock;
2766                 }
2767         }
2768         list_add(&cmd->list, &ftrace_commands);
2769  out_unlock:
2770         mutex_unlock(&ftrace_cmd_mutex);
2771
2772         return ret;
2773 }
2774
2775 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2776 {
2777         struct ftrace_func_command *p, *n;
2778         int ret = -ENODEV;
2779
2780         mutex_lock(&ftrace_cmd_mutex);
2781         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2782                 if (strcmp(cmd->name, p->name) == 0) {
2783                         ret = 0;
2784                         list_del_init(&p->list);
2785                         goto out_unlock;
2786                 }
2787         }
2788  out_unlock:
2789         mutex_unlock(&ftrace_cmd_mutex);
2790
2791         return ret;
2792 }
2793
2794 static int ftrace_process_regex(struct ftrace_hash *hash,
2795                                 char *buff, int len, int enable)
2796 {
2797         char *func, *command, *next = buff;
2798         struct ftrace_func_command *p;
2799         int ret = -EINVAL;
2800
2801         func = strsep(&next, ":");
2802
2803         if (!next) {
2804                 ret = ftrace_match_records(hash, func, len);
2805                 if (!ret)
2806                         ret = -EINVAL;
2807                 if (ret < 0)
2808                         return ret;
2809                 return 0;
2810         }
2811
2812         /* command found */
2813
2814         command = strsep(&next, ":");
2815
2816         mutex_lock(&ftrace_cmd_mutex);
2817         list_for_each_entry(p, &ftrace_commands, list) {
2818                 if (strcmp(p->name, command) == 0) {
2819                         ret = p->func(hash, func, command, next, enable);
2820                         goto out_unlock;
2821                 }
2822         }
2823  out_unlock:
2824         mutex_unlock(&ftrace_cmd_mutex);
2825
2826         return ret;
2827 }
2828
2829 static ssize_t
2830 ftrace_regex_write(struct file *file, const char __user *ubuf,
2831                    size_t cnt, loff_t *ppos, int enable)
2832 {
2833         struct ftrace_iterator *iter;
2834         struct trace_parser *parser;
2835         ssize_t ret, read;
2836
2837         if (!cnt)
2838                 return 0;
2839
2840         mutex_lock(&ftrace_regex_lock);
2841
2842         ret = -ENODEV;
2843         if (unlikely(ftrace_disabled))
2844                 goto out_unlock;
2845
2846         if (file->f_mode & FMODE_READ) {
2847                 struct seq_file *m = file->private_data;
2848                 iter = m->private;
2849         } else
2850                 iter = file->private_data;
2851
2852         parser = &iter->parser;
2853         read = trace_get_user(parser, ubuf, cnt, ppos);
2854
2855         if (read >= 0 && trace_parser_loaded(parser) &&
2856             !trace_parser_cont(parser)) {
2857                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2858                                            parser->idx, enable);
2859                 trace_parser_clear(parser);
2860                 if (ret)
2861                         goto out_unlock;
2862         }
2863
2864         ret = read;
2865 out_unlock:
2866         mutex_unlock(&ftrace_regex_lock);
2867
2868         return ret;
2869 }
2870
2871 static ssize_t
2872 ftrace_filter_write(struct file *file, const char __user *ubuf,
2873                     size_t cnt, loff_t *ppos)
2874 {
2875         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2876 }
2877
2878 static ssize_t
2879 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2880                      size_t cnt, loff_t *ppos)
2881 {
2882         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2883 }
2884
2885 static int
2886 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2887                  int reset, int enable)
2888 {
2889         struct ftrace_hash **orig_hash;
2890         struct ftrace_hash *hash;
2891         int ret;
2892
2893         /* All global ops uses the global ops filters */
2894         if (ops->flags & FTRACE_OPS_FL_GLOBAL)
2895                 ops = &global_ops;
2896
2897         if (unlikely(ftrace_disabled))
2898                 return -ENODEV;
2899
2900         if (enable)
2901                 orig_hash = &ops->filter_hash;
2902         else
2903                 orig_hash = &ops->notrace_hash;
2904
2905         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2906         if (!hash)
2907                 return -ENOMEM;
2908
2909         mutex_lock(&ftrace_regex_lock);
2910         if (reset)
2911                 ftrace_filter_reset(hash);
2912         if (buf)
2913                 ftrace_match_records(hash, buf, len);
2914
2915         mutex_lock(&ftrace_lock);
2916         ret = ftrace_hash_move(ops, enable, orig_hash, hash);
2917         if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
2918             && ftrace_enabled)
2919                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2920
2921         mutex_unlock(&ftrace_lock);
2922
2923         mutex_unlock(&ftrace_regex_lock);
2924
2925         free_ftrace_hash(hash);
2926         return ret;
2927 }
2928
2929 /**
2930  * ftrace_set_filter - set a function to filter on in ftrace
2931  * @ops - the ops to set the filter with
2932  * @buf - the string that holds the function filter text.
2933  * @len - the length of the string.
2934  * @reset - non zero to reset all filters before applying this filter.
2935  *
2936  * Filters denote which functions should be enabled when tracing is enabled.
2937  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2938  */
2939 void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
2940                        int len, int reset)
2941 {
2942         ftrace_set_regex(ops, buf, len, reset, 1);
2943 }
2944 EXPORT_SYMBOL_GPL(ftrace_set_filter);
2945
2946 /**
2947  * ftrace_set_notrace - set a function to not trace in ftrace
2948  * @ops - the ops to set the notrace filter with
2949  * @buf - the string that holds the function notrace text.
2950  * @len - the length of the string.
2951  * @reset - non zero to reset all filters before applying this filter.
2952  *
2953  * Notrace Filters denote which functions should not be enabled when tracing
2954  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2955  * for tracing.
2956  */
2957 void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
2958                         int len, int reset)
2959 {
2960         ftrace_set_regex(ops, buf, len, reset, 0);
2961 }
2962 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
2963 /**
2964  * ftrace_set_filter - set a function to filter on in ftrace
2965  * @ops - the ops to set the filter with
2966  * @buf - the string that holds the function filter text.
2967  * @len - the length of the string.
2968  * @reset - non zero to reset all filters before applying this filter.
2969  *
2970  * Filters denote which functions should be enabled when tracing is enabled.
2971  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2972  */
2973 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
2974 {
2975         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2976 }
2977 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
2978
2979 /**
2980  * ftrace_set_notrace - set a function to not trace in ftrace
2981  * @ops - the ops to set the notrace filter with
2982  * @buf - the string that holds the function notrace text.
2983  * @len - the length of the string.
2984  * @reset - non zero to reset all filters before applying this filter.
2985  *
2986  * Notrace Filters denote which functions should not be enabled when tracing
2987  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2988  * for tracing.
2989  */
2990 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
2991 {
2992         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2993 }
2994 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
2995
2996 /*
2997  * command line interface to allow users to set filters on boot up.
2998  */
2999 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
3000 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3001 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3002
3003 static int __init set_ftrace_notrace(char *str)
3004 {
3005         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3006         return 1;
3007 }
3008 __setup("ftrace_notrace=", set_ftrace_notrace);
3009
3010 static int __init set_ftrace_filter(char *str)
3011 {
3012         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3013         return 1;
3014 }
3015 __setup("ftrace_filter=", set_ftrace_filter);
3016
3017 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3018 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3019 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3020
3021 static int __init set_graph_function(char *str)
3022 {
3023         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3024         return 1;
3025 }
3026 __setup("ftrace_graph_filter=", set_graph_function);
3027
3028 static void __init set_ftrace_early_graph(char *buf)
3029 {
3030         int ret;
3031         char *func;
3032
3033         while (buf) {
3034                 func = strsep(&buf, ",");
3035                 /* we allow only one expression at a time */
3036                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3037                                       func);
3038                 if (ret)
3039                         printk(KERN_DEBUG "ftrace: function %s not "
3040                                           "traceable\n", func);
3041         }
3042 }
3043 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3044
3045 static void __init
3046 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3047 {
3048         char *func;
3049
3050         while (buf) {
3051                 func = strsep(&buf, ",");
3052                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
3053         }
3054 }
3055
3056 static void __init set_ftrace_early_filters(void)
3057 {
3058         if (ftrace_filter_buf[0])
3059                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
3060         if (ftrace_notrace_buf[0])
3061                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
3062 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3063         if (ftrace_graph_buf[0])
3064                 set_ftrace_early_graph(ftrace_graph_buf);
3065 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3066 }
3067
3068 static int
3069 ftrace_regex_release(struct inode *inode, struct file *file)
3070 {
3071         struct seq_file *m = (struct seq_file *)file->private_data;
3072         struct ftrace_iterator *iter;
3073         struct ftrace_hash **orig_hash;
3074         struct trace_parser *parser;
3075         int filter_hash;
3076         int ret;
3077
3078         mutex_lock(&ftrace_regex_lock);
3079         if (file->f_mode & FMODE_READ) {
3080                 iter = m->private;
3081
3082                 seq_release(inode, file);
3083         } else
3084                 iter = file->private_data;
3085
3086         parser = &iter->parser;
3087         if (trace_parser_loaded(parser)) {
3088                 parser->buffer[parser->idx] = 0;
3089                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3090         }
3091
3092         trace_parser_put(parser);
3093
3094         if (file->f_mode & FMODE_WRITE) {
3095                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3096
3097                 if (filter_hash)
3098                         orig_hash = &iter->ops->filter_hash;
3099                 else
3100                         orig_hash = &iter->ops->notrace_hash;
3101
3102                 mutex_lock(&ftrace_lock);
3103                 ret = ftrace_hash_move(iter->ops, filter_hash,
3104                                        orig_hash, iter->hash);
3105                 if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
3106                     && ftrace_enabled)
3107                         ftrace_run_update_code(FTRACE_ENABLE_CALLS);
3108
3109                 mutex_unlock(&ftrace_lock);
3110         }
3111         free_ftrace_hash(iter->hash);
3112         kfree(iter);
3113
3114         mutex_unlock(&ftrace_regex_lock);
3115         return 0;
3116 }
3117
3118 static const struct file_operations ftrace_avail_fops = {
3119         .open = ftrace_avail_open,
3120         .read = seq_read,
3121         .llseek = seq_lseek,
3122         .release = seq_release_private,
3123 };
3124
3125 static const struct file_operations ftrace_enabled_fops = {
3126         .open = ftrace_enabled_open,
3127         .read = seq_read,
3128         .llseek = seq_lseek,
3129         .release = seq_release_private,
3130 };
3131
3132 static const struct file_operations ftrace_filter_fops = {
3133         .open = ftrace_filter_open,
3134         .read = seq_read,
3135         .write = ftrace_filter_write,
3136         .llseek = ftrace_regex_lseek,
3137         .release = ftrace_regex_release,
3138 };
3139
3140 static const struct file_operations ftrace_notrace_fops = {
3141         .open = ftrace_notrace_open,
3142         .read = seq_read,
3143         .write = ftrace_notrace_write,
3144         .llseek = ftrace_regex_lseek,
3145         .release = ftrace_regex_release,
3146 };
3147
3148 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3149
3150 static DEFINE_MUTEX(graph_lock);
3151
3152 int ftrace_graph_count;
3153 int ftrace_graph_filter_enabled;
3154 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3155
3156 static void *
3157 __g_next(struct seq_file *m, loff_t *pos)
3158 {
3159         if (*pos >= ftrace_graph_count)
3160                 return NULL;
3161         return &ftrace_graph_funcs[*pos];
3162 }
3163
3164 static void *
3165 g_next(struct seq_file *m, void *v, loff_t *pos)
3166 {
3167         (*pos)++;
3168         return __g_next(m, pos);
3169 }
3170
3171 static void *g_start(struct seq_file *m, loff_t *pos)
3172 {
3173         mutex_lock(&graph_lock);
3174
3175         /* Nothing, tell g_show to print all functions are enabled */
3176         if (!ftrace_graph_filter_enabled && !*pos)
3177                 return (void *)1;
3178
3179         return __g_next(m, pos);
3180 }
3181
3182 static void g_stop(struct seq_file *m, void *p)
3183 {
3184         mutex_unlock(&graph_lock);
3185 }
3186
3187 static int g_show(struct seq_file *m, void *v)
3188 {
3189         unsigned long *ptr = v;
3190
3191         if (!ptr)
3192                 return 0;
3193
3194         if (ptr == (unsigned long *)1) {
3195                 seq_printf(m, "#### all functions enabled ####\n");
3196                 return 0;
3197         }
3198
3199         seq_printf(m, "%ps\n", (void *)*ptr);
3200
3201         return 0;
3202 }
3203
3204 static const struct seq_operations ftrace_graph_seq_ops = {
3205         .start = g_start,
3206         .next = g_next,
3207         .stop = g_stop,
3208         .show = g_show,
3209 };
3210
3211 static int
3212 ftrace_graph_open(struct inode *inode, struct file *file)
3213 {
3214         int ret = 0;
3215
3216         if (unlikely(ftrace_disabled))
3217                 return -ENODEV;
3218
3219         mutex_lock(&graph_lock);
3220         if ((file->f_mode & FMODE_WRITE) &&
3221             (file->f_flags & O_TRUNC)) {
3222                 ftrace_graph_filter_enabled = 0;
3223                 ftrace_graph_count = 0;
3224                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3225         }
3226         mutex_unlock(&graph_lock);
3227
3228         if (file->f_mode & FMODE_READ)
3229                 ret = seq_open(file, &ftrace_graph_seq_ops);
3230
3231         return ret;
3232 }
3233
3234 static int
3235 ftrace_graph_release(struct inode *inode, struct file *file)
3236 {
3237         if (file->f_mode & FMODE_READ)
3238                 seq_release(inode, file);
3239         return 0;
3240 }
3241
3242 static int
3243 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3244 {
3245         struct dyn_ftrace *rec;
3246         struct ftrace_page *pg;
3247         int search_len;
3248         int fail = 1;
3249         int type, not;
3250         char *search;
3251         bool exists;
3252         int i;
3253
3254         /* decode regex */
3255         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3256         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3257                 return -EBUSY;
3258
3259         search_len = strlen(search);
3260
3261         mutex_lock(&ftrace_lock);
3262
3263         if (unlikely(ftrace_disabled)) {
3264                 mutex_unlock(&ftrace_lock);
3265                 return -ENODEV;
3266         }
3267
3268         do_for_each_ftrace_rec(pg, rec) {
3269
3270                 if (rec->flags & FTRACE_FL_FREE)
3271                         continue;
3272
3273                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3274                         /* if it is in the array */
3275                         exists = false;
3276                         for (i = 0; i < *idx; i++) {
3277                                 if (array[i] == rec->ip) {
3278                                         exists = true;
3279                                         break;
3280                                 }
3281                         }
3282
3283                         if (!not) {
3284                                 fail = 0;
3285                                 if (!exists) {
3286                                         array[(*idx)++] = rec->ip;
3287                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3288                                                 goto out;
3289                                 }
3290                         } else {
3291                                 if (exists) {
3292                                         array[i] = array[--(*idx)];
3293                                         array[*idx] = 0;
3294                                         fail = 0;
3295                                 }
3296                         }
3297                 }
3298         } while_for_each_ftrace_rec();
3299 out:
3300         mutex_unlock(&ftrace_lock);
3301
3302         if (fail)
3303                 return -EINVAL;
3304
3305         ftrace_graph_filter_enabled = 1;
3306         return 0;
3307 }
3308
3309 static ssize_t
3310 ftrace_graph_write(struct file *file, const char __user *ubuf,
3311                    size_t cnt, loff_t *ppos)
3312 {
3313         struct trace_parser parser;
3314         ssize_t read, ret;
3315
3316         if (!cnt)
3317                 return 0;
3318
3319         mutex_lock(&graph_lock);
3320
3321         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3322                 ret = -ENOMEM;
3323                 goto out_unlock;
3324         }
3325
3326         read = trace_get_user(&parser, ubuf, cnt, ppos);
3327
3328         if (read >= 0 && trace_parser_loaded((&parser))) {
3329                 parser.buffer[parser.idx] = 0;
3330
3331                 /* we allow only one expression at a time */
3332                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3333                                         parser.buffer);
3334                 if (ret)
3335                         goto out_free;
3336         }
3337
3338         ret = read;
3339
3340 out_free:
3341         trace_parser_put(&parser);
3342 out_unlock:
3343         mutex_unlock(&graph_lock);
3344
3345         return ret;
3346 }
3347
3348 static const struct file_operations ftrace_graph_fops = {
3349         .open           = ftrace_graph_open,
3350         .read           = seq_read,
3351         .write          = ftrace_graph_write,
3352         .release        = ftrace_graph_release,
3353         .llseek         = seq_lseek,
3354 };
3355 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3356
3357 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3358 {
3359
3360         trace_create_file("available_filter_functions", 0444,
3361                         d_tracer, NULL, &ftrace_avail_fops);
3362
3363         trace_create_file("enabled_functions", 0444,
3364                         d_tracer, NULL, &ftrace_enabled_fops);
3365
3366         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3367                         NULL, &ftrace_filter_fops);
3368
3369         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3370                                     NULL, &ftrace_notrace_fops);
3371
3372 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3373         trace_create_file("set_graph_function", 0444, d_tracer,
3374                                     NULL,
3375                                     &ftrace_graph_fops);
3376 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3377
3378         return 0;
3379 }
3380
3381 static int ftrace_process_locs(struct module *mod,
3382                                unsigned long *start,
3383                                unsigned long *end)
3384 {
3385         unsigned long *p;
3386         unsigned long addr;
3387         unsigned long flags = 0; /* Shut up gcc */
3388
3389         mutex_lock(&ftrace_lock);
3390         p = start;
3391         while (p < end) {
3392                 addr = ftrace_call_adjust(*p++);
3393                 /*
3394                  * Some architecture linkers will pad between
3395                  * the different mcount_loc sections of different
3396                  * object files to satisfy alignments.
3397                  * Skip any NULL pointers.
3398                  */
3399                 if (!addr)
3400                         continue;
3401                 ftrace_record_ip(addr);
3402         }
3403
3404         /*
3405          * We only need to disable interrupts on start up
3406          * because we are modifying code that an interrupt
3407          * may execute, and the modification is not atomic.
3408          * But for modules, nothing runs the code we modify
3409          * until we are finished with it, and there's no
3410          * reason to cause large interrupt latencies while we do it.
3411          */
3412         if (!mod)
3413                 local_irq_save(flags);
3414         ftrace_update_code(mod);
3415         if (!mod)
3416                 local_irq_restore(flags);
3417         mutex_unlock(&ftrace_lock);
3418
3419         return 0;
3420 }
3421
3422 #ifdef CONFIG_MODULES
3423 void ftrace_release_mod(struct module *mod)
3424 {
3425         struct dyn_ftrace *rec;
3426         struct ftrace_page *pg;
3427
3428         mutex_lock(&ftrace_lock);
3429
3430         if (ftrace_disabled)
3431                 goto out_unlock;
3432
3433         do_for_each_ftrace_rec(pg, rec) {
3434                 if (within_module_core(rec->ip, mod)) {
3435                         /*
3436                          * rec->ip is changed in ftrace_free_rec()
3437                          * It should not between s and e if record was freed.
3438                          */
3439                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3440                         ftrace_free_rec(rec);
3441                 }
3442         } while_for_each_ftrace_rec();
3443  out_unlock:
3444         mutex_unlock(&ftrace_lock);
3445 }
3446
3447 static void ftrace_init_module(struct module *mod,
3448                                unsigned long *start, unsigned long *end)
3449 {
3450         if (ftrace_disabled || start == end)
3451                 return;
3452         ftrace_process_locs(mod, start, end);
3453 }
3454
3455 static int ftrace_module_notify(struct notifier_block *self,
3456                                 unsigned long val, void *data)
3457 {
3458         struct module *mod = data;
3459
3460         switch (val) {
3461         case MODULE_STATE_COMING:
3462                 ftrace_init_module(mod, mod->ftrace_callsites,
3463                                    mod->ftrace_callsites +
3464                                    mod->num_ftrace_callsites);
3465                 break;
3466         case MODULE_STATE_GOING:
3467                 ftrace_release_mod(mod);
3468                 break;
3469         }
3470
3471         return 0;
3472 }
3473 #else
3474 static int ftrace_module_notify(struct notifier_block *self,
3475                                 unsigned long val, void *data)
3476 {
3477         return 0;
3478 }
3479 #endif /* CONFIG_MODULES */
3480
3481 struct notifier_block ftrace_module_nb = {
3482         .notifier_call = ftrace_module_notify,
3483         .priority = 0,
3484 };
3485
3486 extern unsigned long __start_mcount_loc[];
3487 extern unsigned long __stop_mcount_loc[];
3488
3489 void __init ftrace_init(void)
3490 {
3491         unsigned long count, addr, flags;
3492         int ret;
3493
3494         /* Keep the ftrace pointer to the stub */
3495         addr = (unsigned long)ftrace_stub;
3496
3497         local_irq_save(flags);
3498         ftrace_dyn_arch_init(&addr);
3499         local_irq_restore(flags);
3500
3501         /* ftrace_dyn_arch_init places the return code in addr */
3502         if (addr)
3503                 goto failed;
3504
3505         count = __stop_mcount_loc - __start_mcount_loc;
3506
3507         ret = ftrace_dyn_table_alloc(count);
3508         if (ret)
3509                 goto failed;
3510
3511         last_ftrace_enabled = ftrace_enabled = 1;
3512
3513         ret = ftrace_process_locs(NULL,
3514                                   __start_mcount_loc,
3515                                   __stop_mcount_loc);
3516
3517         ret = register_module_notifier(&ftrace_module_nb);
3518         if (ret)
3519                 pr_warning("Failed to register trace ftrace module notifier\n");
3520
3521         set_ftrace_early_filters();
3522
3523         return;
3524  failed:
3525         ftrace_disabled = 1;
3526 }
3527
3528 #else
3529
3530 static struct ftrace_ops global_ops = {
3531         .func                   = ftrace_stub,
3532 };
3533
3534 static int __init ftrace_nodyn_init(void)
3535 {
3536         ftrace_enabled = 1;
3537         return 0;
3538 }
3539 device_initcall(ftrace_nodyn_init);
3540
3541 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3542 static inline void ftrace_startup_enable(int command) { }
3543 /* Keep as macros so we do not need to define the commands */
3544 # define ftrace_startup(ops, command)                   \
3545         ({                                              \
3546                 (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
3547                 0;                                      \
3548         })
3549 # define ftrace_shutdown(ops, command)  do { } while (0)
3550 # define ftrace_startup_sysctl()        do { } while (0)
3551 # define ftrace_shutdown_sysctl()       do { } while (0)
3552
3553 static inline int
3554 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
3555 {
3556         return 1;
3557 }
3558
3559 #endif /* CONFIG_DYNAMIC_FTRACE */
3560
3561 static void
3562 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
3563 {
3564         struct ftrace_ops *op;
3565
3566         if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
3567                 return;
3568
3569         trace_recursion_set(TRACE_INTERNAL_BIT);
3570         /*
3571          * Some of the ops may be dynamically allocated,
3572          * they must be freed after a synchronize_sched().
3573          */
3574         preempt_disable_notrace();
3575         op = rcu_dereference_raw(ftrace_ops_list);
3576         while (op != &ftrace_list_end) {
3577                 if (ftrace_ops_test(op, ip))
3578                         op->func(ip, parent_ip);
3579                 op = rcu_dereference_raw(op->next);
3580         };
3581         preempt_enable_notrace();
3582         trace_recursion_clear(TRACE_INTERNAL_BIT);
3583 }
3584
3585 static void clear_ftrace_swapper(void)
3586 {
3587         struct task_struct *p;
3588         int cpu;
3589
3590         get_online_cpus();
3591         for_each_online_cpu(cpu) {
3592                 p = idle_task(cpu);
3593                 clear_tsk_trace_trace(p);
3594         }
3595         put_online_cpus();
3596 }
3597
3598 static void set_ftrace_swapper(void)
3599 {
3600         struct task_struct *p;
3601         int cpu;
3602
3603         get_online_cpus();
3604         for_each_online_cpu(cpu) {
3605                 p = idle_task(cpu);
3606                 set_tsk_trace_trace(p);
3607         }
3608         put_online_cpus();
3609 }
3610
3611 static void clear_ftrace_pid(struct pid *pid)
3612 {
3613         struct task_struct *p;
3614
3615         rcu_read_lock();
3616         do_each_pid_task(pid, PIDTYPE_PID, p) {
3617                 clear_tsk_trace_trace(p);
3618         } while_each_pid_task(pid, PIDTYPE_PID, p);
3619         rcu_read_unlock();
3620
3621         put_pid(pid);
3622 }
3623
3624 static void set_ftrace_pid(struct pid *pid)
3625 {
3626         struct task_struct *p;
3627
3628         rcu_read_lock();
3629         do_each_pid_task(pid, PIDTYPE_PID, p) {
3630                 set_tsk_trace_trace(p);
3631         } while_each_pid_task(pid, PIDTYPE_PID, p);
3632         rcu_read_unlock();
3633 }
3634
3635 static void clear_ftrace_pid_task(struct pid *pid)
3636 {
3637         if (pid == ftrace_swapper_pid)
3638                 clear_ftrace_swapper();
3639         else
3640                 clear_ftrace_pid(pid);
3641 }
3642
3643 static void set_ftrace_pid_task(struct pid *pid)
3644 {
3645         if (pid == ftrace_swapper_pid)
3646                 set_ftrace_swapper();
3647         else
3648                 set_ftrace_pid(pid);
3649 }
3650
3651 static int ftrace_pid_add(int p)
3652 {
3653         struct pid *pid;
3654         struct ftrace_pid *fpid;
3655         int ret = -EINVAL;
3656
3657         mutex_lock(&ftrace_lock);
3658
3659         if (!p)
3660                 pid = ftrace_swapper_pid;
3661         else
3662                 pid = find_get_pid(p);
3663
3664         if (!pid)
3665                 goto out;
3666
3667         ret = 0;
3668
3669         list_for_each_entry(fpid, &ftrace_pids, list)
3670                 if (fpid->pid == pid)
3671                         goto out_put;
3672
3673         ret = -ENOMEM;
3674
3675         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3676         if (!fpid)
3677                 goto out_put;
3678
3679         list_add(&fpid->list, &ftrace_pids);
3680         fpid->pid = pid;
3681
3682         set_ftrace_pid_task(pid);
3683
3684         ftrace_update_pid_func();
3685         ftrace_startup_enable(0);
3686
3687         mutex_unlock(&ftrace_lock);
3688         return 0;
3689
3690 out_put:
3691         if (pid != ftrace_swapper_pid)
3692                 put_pid(pid);
3693
3694 out:
3695         mutex_unlock(&ftrace_lock);
3696         return ret;
3697 }
3698
3699 static void ftrace_pid_reset(void)
3700 {
3701         struct ftrace_pid *fpid, *safe;
3702
3703         mutex_lock(&ftrace_lock);
3704         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3705                 struct pid *pid = fpid->pid;
3706
3707                 clear_ftrace_pid_task(pid);
3708
3709                 list_del(&fpid->list);
3710                 kfree(fpid);
3711         }
3712
3713         ftrace_update_pid_func();
3714         ftrace_startup_enable(0);
3715
3716         mutex_unlock(&ftrace_lock);
3717 }
3718
3719 static void *fpid_start(struct seq_file *m, loff_t *pos)
3720 {
3721         mutex_lock(&ftrace_lock);
3722
3723         if (list_empty(&ftrace_pids) && (!*pos))
3724                 return (void *) 1;
3725
3726         return seq_list_start(&ftrace_pids, *pos);
3727 }
3728
3729 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3730 {
3731         if (v == (void *)1)
3732                 return NULL;
3733
3734         return seq_list_next(v, &ftrace_pids, pos);
3735 }
3736
3737 static void fpid_stop(struct seq_file *m, void *p)
3738 {
3739         mutex_unlock(&ftrace_lock);
3740 }
3741
3742 static int fpid_show(struct seq_file *m, void *v)
3743 {
3744         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3745
3746         if (v == (void *)1) {
3747                 seq_printf(m, "no pid\n");
3748                 return 0;
3749         }
3750
3751         if (fpid->pid == ftrace_swapper_pid)
3752                 seq_printf(m, "swapper tasks\n");
3753         else
3754                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3755
3756         return 0;
3757 }
3758
3759 static const struct seq_operations ftrace_pid_sops = {
3760         .start = fpid_start,
3761         .next = fpid_next,
3762         .stop = fpid_stop,
3763         .show = fpid_show,
3764 };
3765
3766 static int
3767 ftrace_pid_open(struct inode *inode, struct file *file)
3768 {
3769         int ret = 0;
3770
3771         if ((file->f_mode & FMODE_WRITE) &&
3772             (file->f_flags & O_TRUNC))
3773                 ftrace_pid_reset();
3774
3775         if (file->f_mode & FMODE_READ)
3776                 ret = seq_open(file, &ftrace_pid_sops);
3777
3778         return ret;
3779 }
3780
3781 static ssize_t
3782 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3783                    size_t cnt, loff_t *ppos)
3784 {
3785         char buf[64], *tmp;
3786         long val;
3787         int ret;
3788
3789         if (cnt >= sizeof(buf))
3790                 return -EINVAL;
3791
3792         if (copy_from_user(&buf, ubuf, cnt))
3793                 return -EFAULT;
3794
3795         buf[cnt] = 0;
3796
3797         /*
3798          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3799          * to clean the filter quietly.
3800          */
3801         tmp = strstrip(buf);
3802         if (strlen(tmp) == 0)
3803                 return 1;
3804
3805         ret = strict_strtol(tmp, 10, &val);
3806         if (ret < 0)
3807                 return ret;
3808
3809         ret = ftrace_pid_add(val);
3810
3811         return ret ? ret : cnt;
3812 }
3813
3814 static int
3815 ftrace_pid_release(struct inode *inode, struct file *file)
3816 {
3817         if (file->f_mode & FMODE_READ)
3818                 seq_release(inode, file);
3819
3820         return 0;
3821 }
3822
3823 static const struct file_operations ftrace_pid_fops = {
3824         .open           = ftrace_pid_open,
3825         .write          = ftrace_pid_write,
3826         .read           = seq_read,
3827         .llseek         = seq_lseek,
3828         .release        = ftrace_pid_release,
3829 };
3830
3831 static __init int ftrace_init_debugfs(void)
3832 {
3833         struct dentry *d_tracer;
3834
3835         d_tracer = tracing_init_dentry();
3836         if (!d_tracer)
3837                 return 0;
3838
3839         ftrace_init_dyn_debugfs(d_tracer);
3840
3841         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3842                             NULL, &ftrace_pid_fops);
3843
3844         ftrace_profile_debugfs(d_tracer);
3845
3846         return 0;
3847 }
3848 fs_initcall(ftrace_init_debugfs);
3849
3850 /**
3851  * ftrace_kill - kill ftrace
3852  *
3853  * This function should be used by panic code. It stops ftrace
3854  * but in a not so nice way. If you need to simply kill ftrace
3855  * from a non-atomic section, use ftrace_kill.
3856  */
3857 void ftrace_kill(void)
3858 {
3859         ftrace_disabled = 1;
3860         ftrace_enabled = 0;
3861         clear_ftrace_function();
3862 }
3863
3864 /**
3865  * register_ftrace_function - register a function for profiling
3866  * @ops - ops structure that holds the function for profiling.
3867  *
3868  * Register a function to be called by all functions in the
3869  * kernel.
3870  *
3871  * Note: @ops->func and all the functions it calls must be labeled
3872  *       with "notrace", otherwise it will go into a
3873  *       recursive loop.
3874  */
3875 int register_ftrace_function(struct ftrace_ops *ops)
3876 {
3877         int ret = -1;
3878
3879         mutex_lock(&ftrace_lock);
3880
3881         if (unlikely(ftrace_disabled))
3882                 goto out_unlock;
3883
3884         ret = __register_ftrace_function(ops);
3885         if (!ret)
3886                 ret = ftrace_startup(ops, 0);
3887
3888
3889  out_unlock:
3890         mutex_unlock(&ftrace_lock);
3891         return ret;
3892 }
3893 EXPORT_SYMBOL_GPL(register_ftrace_function);
3894
3895 /**
3896  * unregister_ftrace_function - unregister a function for profiling.
3897  * @ops - ops structure that holds the function to unregister
3898  *
3899  * Unregister a function that was added to be called by ftrace profiling.
3900  */
3901 int unregister_ftrace_function(struct ftrace_ops *ops)
3902 {
3903         int ret;
3904
3905         mutex_lock(&ftrace_lock);
3906         ret = __unregister_ftrace_function(ops);
3907         if (!ret)
3908                 ftrace_shutdown(ops, 0);
3909         mutex_unlock(&ftrace_lock);
3910
3911         return ret;
3912 }
3913 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
3914
3915 int
3916 ftrace_enable_sysctl(struct ctl_table *table, int write,
3917                      void __user *buffer, size_t *lenp,
3918                      loff_t *ppos)
3919 {
3920         int ret = -ENODEV;
3921
3922         mutex_lock(&ftrace_lock);
3923
3924         if (unlikely(ftrace_disabled))
3925                 goto out;
3926
3927         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3928
3929         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3930                 goto out;
3931
3932         last_ftrace_enabled = !!ftrace_enabled;
3933
3934         if (ftrace_enabled) {
3935
3936                 ftrace_startup_sysctl();
3937
3938                 /* we are starting ftrace again */
3939                 if (ftrace_ops_list != &ftrace_list_end) {
3940                         if (ftrace_ops_list->next == &ftrace_list_end)
3941                                 ftrace_trace_function = ftrace_ops_list->func;
3942                         else
3943                                 ftrace_trace_function = ftrace_ops_list_func;
3944                 }
3945
3946         } else {
3947                 /* stopping ftrace calls (just send to ftrace_stub) */
3948                 ftrace_trace_function = ftrace_stub;
3949
3950                 ftrace_shutdown_sysctl();
3951         }
3952
3953  out:
3954         mutex_unlock(&ftrace_lock);
3955         return ret;
3956 }
3957
3958 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3959
3960 static int ftrace_graph_active;
3961 static struct notifier_block ftrace_suspend_notifier;
3962
3963 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3964 {
3965         return 0;
3966 }
3967
3968 /* The callbacks that hook a function */
3969 trace_func_graph_ret_t ftrace_graph_return =
3970                         (trace_func_graph_ret_t)ftrace_stub;
3971 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3972
3973 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3974 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3975 {
3976         int i;
3977         int ret = 0;
3978         unsigned long flags;
3979         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3980         struct task_struct *g, *t;
3981
3982         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3983                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3984                                         * sizeof(struct ftrace_ret_stack),
3985                                         GFP_KERNEL);
3986                 if (!ret_stack_list[i]) {
3987                         start = 0;
3988                         end = i;
3989                         ret = -ENOMEM;
3990                         goto free;
3991                 }
3992         }
3993
3994         read_lock_irqsave(&tasklist_lock, flags);
3995         do_each_thread(g, t) {
3996                 if (start == end) {
3997                         ret = -EAGAIN;
3998                         goto unlock;
3999                 }
4000
4001                 if (t->ret_stack == NULL) {
4002                         atomic_set(&t->tracing_graph_pause, 0);
4003                         atomic_set(&t->trace_overrun, 0);
4004                         t->curr_ret_stack = -1;
4005                         /* Make sure the tasks see the -1 first: */
4006                         smp_wmb();
4007                         t->ret_stack = ret_stack_list[start++];
4008                 }
4009         } while_each_thread(g, t);
4010
4011 unlock:
4012         read_unlock_irqrestore(&tasklist_lock, flags);
4013 free:
4014         for (i = start; i < end; i++)
4015                 kfree(ret_stack_list[i]);
4016         return ret;
4017 }
4018
4019 static void
4020 ftrace_graph_probe_sched_switch(void *ignore,
4021                         struct task_struct *prev, struct task_struct *next)
4022 {
4023         unsigned long long timestamp;
4024         int index;
4025
4026         /*
4027          * Does the user want to count the time a function was asleep.
4028          * If so, do not update the time stamps.
4029          */
4030         if (trace_flags & TRACE_ITER_SLEEP_TIME)
4031                 return;
4032
4033         timestamp = trace_clock_local();
4034
4035         prev->ftrace_timestamp = timestamp;
4036
4037         /* only process tasks that we timestamped */
4038         if (!next->ftrace_timestamp)
4039                 return;
4040
4041         /*
4042          * Update all the counters in next to make up for the
4043          * time next was sleeping.
4044          */
4045         timestamp -= next->ftrace_timestamp;
4046
4047         for (index = next->curr_ret_stack; index >= 0; index--)
4048                 next->ret_stack[index].calltime += timestamp;
4049 }
4050
4051 /* Allocate a return stack for each task */
4052 static int start_graph_tracing(void)
4053 {
4054         struct ftrace_ret_stack **ret_stack_list;
4055         int ret, cpu;
4056
4057         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4058                                 sizeof(struct ftrace_ret_stack *),
4059                                 GFP_KERNEL);
4060
4061         if (!ret_stack_list)
4062                 return -ENOMEM;
4063
4064         /* The cpu_boot init_task->ret_stack will never be freed */
4065         for_each_online_cpu(cpu) {
4066                 if (!idle_task(cpu)->ret_stack)
4067                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4068         }
4069
4070         do {
4071                 ret = alloc_retstack_tasklist(ret_stack_list);
4072         } while (ret == -EAGAIN);
4073
4074         if (!ret) {
4075                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4076                 if (ret)
4077                         pr_info("ftrace_graph: Couldn't activate tracepoint"
4078                                 " probe to kernel_sched_switch\n");
4079         }
4080
4081         kfree(ret_stack_list);
4082         return ret;
4083 }
4084
4085 /*
4086  * Hibernation protection.
4087  * The state of the current task is too much unstable during
4088  * suspend/restore to disk. We want to protect against that.
4089  */
4090 static int
4091 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4092                                                         void *unused)
4093 {
4094         switch (state) {
4095         case PM_HIBERNATION_PREPARE:
4096                 pause_graph_tracing();
4097                 break;
4098
4099         case PM_POST_HIBERNATION:
4100                 unpause_graph_tracing();
4101                 break;
4102         }
4103         return NOTIFY_DONE;
4104 }
4105
4106 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4107                         trace_func_graph_ent_t entryfunc)
4108 {
4109         int ret = 0;
4110
4111         mutex_lock(&ftrace_lock);
4112
4113         /* we currently allow only one tracer registered at a time */
4114         if (ftrace_graph_active) {
4115                 ret = -EBUSY;
4116                 goto out;
4117         }
4118
4119         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4120         register_pm_notifier(&ftrace_suspend_notifier);
4121
4122         ftrace_graph_active++;
4123         ret = start_graph_tracing();
4124         if (ret) {
4125                 ftrace_graph_active--;
4126                 goto out;
4127         }
4128
4129         ftrace_graph_return = retfunc;
4130         ftrace_graph_entry = entryfunc;
4131
4132         ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4133
4134 out:
4135         mutex_unlock(&ftrace_lock);
4136         return ret;
4137 }
4138
4139 void unregister_ftrace_graph(void)
4140 {
4141         mutex_lock(&ftrace_lock);
4142
4143         if (unlikely(!ftrace_graph_active))
4144                 goto out;
4145
4146         ftrace_graph_active--;
4147         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4148         ftrace_graph_entry = ftrace_graph_entry_stub;
4149         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4150         unregister_pm_notifier(&ftrace_suspend_notifier);
4151         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4152
4153  out:
4154         mutex_unlock(&ftrace_lock);
4155 }
4156
4157 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4158
4159 static void
4160 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4161 {
4162         atomic_set(&t->tracing_graph_pause, 0);
4163         atomic_set(&t->trace_overrun, 0);
4164         t->ftrace_timestamp = 0;
4165         /* make curr_ret_stack visible before we add the ret_stack */
4166         smp_wmb();
4167         t->ret_stack = ret_stack;
4168 }
4169
4170 /*
4171  * Allocate a return stack for the idle task. May be the first
4172  * time through, or it may be done by CPU hotplug online.
4173  */
4174 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4175 {
4176         t->curr_ret_stack = -1;
4177         /*
4178          * The idle task has no parent, it either has its own
4179          * stack or no stack at all.
4180          */
4181         if (t->ret_stack)
4182                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4183
4184         if (ftrace_graph_active) {
4185                 struct ftrace_ret_stack *ret_stack;
4186
4187                 ret_stack = per_cpu(idle_ret_stack, cpu);
4188                 if (!ret_stack) {
4189                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4190                                             * sizeof(struct ftrace_ret_stack),
4191                                             GFP_KERNEL);
4192                         if (!ret_stack)
4193                                 return;
4194                         per_cpu(idle_ret_stack, cpu) = ret_stack;
4195                 }
4196                 graph_init_task(t, ret_stack);
4197         }
4198 }
4199
4200 /* Allocate a return stack for newly created task */
4201 void ftrace_graph_init_task(struct task_struct *t)
4202 {
4203         /* Make sure we do not use the parent ret_stack */
4204         t->ret_stack = NULL;
4205         t->curr_ret_stack = -1;
4206
4207         if (ftrace_graph_active) {
4208                 struct ftrace_ret_stack *ret_stack;
4209
4210                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4211                                 * sizeof(struct ftrace_ret_stack),
4212                                 GFP_KERNEL);
4213                 if (!ret_stack)
4214                         return;
4215                 graph_init_task(t, ret_stack);
4216         }
4217 }
4218
4219 void ftrace_graph_exit_task(struct task_struct *t)
4220 {
4221         struct ftrace_ret_stack *ret_stack = t->ret_stack;
4222
4223         t->ret_stack = NULL;
4224         /* NULL must become visible to IRQs before we free it: */
4225         barrier();
4226
4227         kfree(ret_stack);
4228 }
4229
4230 void ftrace_graph_stop(void)
4231 {
4232         ftrace_stop();
4233 }
4234 #endif