ftrace: Replace read_barrier_depends() with rcu_dereference_raw()
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29 #include <linux/hash.h>
30 #include <linux/rcupdate.h>
31
32 #include <trace/events/sched.h>
33
34 #include <asm/ftrace.h>
35 #include <asm/setup.h>
36
37 #include "trace_output.h"
38 #include "trace_stat.h"
39
40 #define FTRACE_WARN_ON(cond)                    \
41         do {                                    \
42                 if (WARN_ON(cond))              \
43                         ftrace_kill();          \
44         } while (0)
45
46 #define FTRACE_WARN_ON_ONCE(cond)               \
47         do {                                    \
48                 if (WARN_ON_ONCE(cond))         \
49                         ftrace_kill();          \
50         } while (0)
51
52 /* hash bits for specific function selection */
53 #define FTRACE_HASH_BITS 7
54 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55
56 /* ftrace_enabled is a method to turn ftrace on or off */
57 int ftrace_enabled __read_mostly;
58 static int last_ftrace_enabled;
59
60 /* Quick disabling of function tracer. */
61 int function_trace_stop;
62
63 /* List for set_ftrace_pid's pids. */
64 LIST_HEAD(ftrace_pids);
65 struct ftrace_pid {
66         struct list_head list;
67         struct pid *pid;
68 };
69
70 /*
71  * ftrace_disabled is set when an anomaly is discovered.
72  * ftrace_disabled is much stronger than ftrace_enabled.
73  */
74 static int ftrace_disabled __read_mostly;
75
76 static DEFINE_MUTEX(ftrace_lock);
77
78 static struct ftrace_ops ftrace_list_end __read_mostly =
79 {
80         .func           = ftrace_stub,
81 };
82
83 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
84 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
85 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
86 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
87
88 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
89 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
90 #endif
91
92 /*
93  * Traverse the ftrace_list, invoking all entries.  The reason that we
94  * can use rcu_dereference_raw() is that elements removed from this list
95  * are simply leaked, so there is no need to interact with a grace-period
96  * mechanism.  The rcu_dereference_raw() calls are needed to handle
97  * concurrent insertions into the ftrace_list.
98  *
99  * Silly Alpha and silly pointer-speculation compiler optimizations!
100  */
101 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
102 {
103         struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
104
105         while (op != &ftrace_list_end) {
106                 op->func(ip, parent_ip);
107                 op = rcu_dereference_raw(op->next); /*see above*/
108         };
109 }
110
111 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
112 {
113         if (!test_tsk_trace_trace(current))
114                 return;
115
116         ftrace_pid_function(ip, parent_ip);
117 }
118
119 static void set_ftrace_pid_function(ftrace_func_t func)
120 {
121         /* do not set ftrace_pid_function to itself! */
122         if (func != ftrace_pid_func)
123                 ftrace_pid_function = func;
124 }
125
126 /**
127  * clear_ftrace_function - reset the ftrace function
128  *
129  * This NULLs the ftrace function and in essence stops
130  * tracing.  There may be lag
131  */
132 void clear_ftrace_function(void)
133 {
134         ftrace_trace_function = ftrace_stub;
135         __ftrace_trace_function = ftrace_stub;
136         ftrace_pid_function = ftrace_stub;
137 }
138
139 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
140 /*
141  * For those archs that do not test ftrace_trace_stop in their
142  * mcount call site, we need to do it from C.
143  */
144 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
145 {
146         if (function_trace_stop)
147                 return;
148
149         __ftrace_trace_function(ip, parent_ip);
150 }
151 #endif
152
153 static int __register_ftrace_function(struct ftrace_ops *ops)
154 {
155         ops->next = ftrace_list;
156         /*
157          * We are entering ops into the ftrace_list but another
158          * CPU might be walking that list. We need to make sure
159          * the ops->next pointer is valid before another CPU sees
160          * the ops pointer included into the ftrace_list.
161          */
162         rcu_assign_pointer(ftrace_list, ops);
163
164         if (ftrace_enabled) {
165                 ftrace_func_t func;
166
167                 if (ops->next == &ftrace_list_end)
168                         func = ops->func;
169                 else
170                         func = ftrace_list_func;
171
172                 if (!list_empty(&ftrace_pids)) {
173                         set_ftrace_pid_function(func);
174                         func = ftrace_pid_func;
175                 }
176
177                 /*
178                  * For one func, simply call it directly.
179                  * For more than one func, call the chain.
180                  */
181 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
182                 ftrace_trace_function = func;
183 #else
184                 __ftrace_trace_function = func;
185                 ftrace_trace_function = ftrace_test_stop_func;
186 #endif
187         }
188
189         return 0;
190 }
191
192 static int __unregister_ftrace_function(struct ftrace_ops *ops)
193 {
194         struct ftrace_ops **p;
195
196         /*
197          * If we are removing the last function, then simply point
198          * to the ftrace_stub.
199          */
200         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
201                 ftrace_trace_function = ftrace_stub;
202                 ftrace_list = &ftrace_list_end;
203                 return 0;
204         }
205
206         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
207                 if (*p == ops)
208                         break;
209
210         if (*p != ops)
211                 return -1;
212
213         *p = (*p)->next;
214
215         if (ftrace_enabled) {
216                 /* If we only have one func left, then call that directly */
217                 if (ftrace_list->next == &ftrace_list_end) {
218                         ftrace_func_t func = ftrace_list->func;
219
220                         if (!list_empty(&ftrace_pids)) {
221                                 set_ftrace_pid_function(func);
222                                 func = ftrace_pid_func;
223                         }
224 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
225                         ftrace_trace_function = func;
226 #else
227                         __ftrace_trace_function = func;
228 #endif
229                 }
230         }
231
232         return 0;
233 }
234
235 static void ftrace_update_pid_func(void)
236 {
237         ftrace_func_t func;
238
239         if (ftrace_trace_function == ftrace_stub)
240                 return;
241
242 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
243         func = ftrace_trace_function;
244 #else
245         func = __ftrace_trace_function;
246 #endif
247
248         if (!list_empty(&ftrace_pids)) {
249                 set_ftrace_pid_function(func);
250                 func = ftrace_pid_func;
251         } else {
252                 if (func == ftrace_pid_func)
253                         func = ftrace_pid_function;
254         }
255
256 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
257         ftrace_trace_function = func;
258 #else
259         __ftrace_trace_function = func;
260 #endif
261 }
262
263 #ifdef CONFIG_FUNCTION_PROFILER
264 struct ftrace_profile {
265         struct hlist_node               node;
266         unsigned long                   ip;
267         unsigned long                   counter;
268 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
269         unsigned long long              time;
270 #endif
271 };
272
273 struct ftrace_profile_page {
274         struct ftrace_profile_page      *next;
275         unsigned long                   index;
276         struct ftrace_profile           records[];
277 };
278
279 struct ftrace_profile_stat {
280         atomic_t                        disabled;
281         struct hlist_head               *hash;
282         struct ftrace_profile_page      *pages;
283         struct ftrace_profile_page      *start;
284         struct tracer_stat              stat;
285 };
286
287 #define PROFILE_RECORDS_SIZE                                            \
288         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
289
290 #define PROFILES_PER_PAGE                                       \
291         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
292
293 static int ftrace_profile_bits __read_mostly;
294 static int ftrace_profile_enabled __read_mostly;
295
296 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
297 static DEFINE_MUTEX(ftrace_profile_lock);
298
299 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
300
301 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
302
303 static void *
304 function_stat_next(void *v, int idx)
305 {
306         struct ftrace_profile *rec = v;
307         struct ftrace_profile_page *pg;
308
309         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
310
311  again:
312         if (idx != 0)
313                 rec++;
314
315         if ((void *)rec >= (void *)&pg->records[pg->index]) {
316                 pg = pg->next;
317                 if (!pg)
318                         return NULL;
319                 rec = &pg->records[0];
320                 if (!rec->counter)
321                         goto again;
322         }
323
324         return rec;
325 }
326
327 static void *function_stat_start(struct tracer_stat *trace)
328 {
329         struct ftrace_profile_stat *stat =
330                 container_of(trace, struct ftrace_profile_stat, stat);
331
332         if (!stat || !stat->start)
333                 return NULL;
334
335         return function_stat_next(&stat->start->records[0], 0);
336 }
337
338 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
339 /* function graph compares on total time */
340 static int function_stat_cmp(void *p1, void *p2)
341 {
342         struct ftrace_profile *a = p1;
343         struct ftrace_profile *b = p2;
344
345         if (a->time < b->time)
346                 return -1;
347         if (a->time > b->time)
348                 return 1;
349         else
350                 return 0;
351 }
352 #else
353 /* not function graph compares against hits */
354 static int function_stat_cmp(void *p1, void *p2)
355 {
356         struct ftrace_profile *a = p1;
357         struct ftrace_profile *b = p2;
358
359         if (a->counter < b->counter)
360                 return -1;
361         if (a->counter > b->counter)
362                 return 1;
363         else
364                 return 0;
365 }
366 #endif
367
368 static int function_stat_headers(struct seq_file *m)
369 {
370 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
371         seq_printf(m, "  Function                               "
372                    "Hit    Time            Avg\n"
373                       "  --------                               "
374                    "---    ----            ---\n");
375 #else
376         seq_printf(m, "  Function                               Hit\n"
377                       "  --------                               ---\n");
378 #endif
379         return 0;
380 }
381
382 static int function_stat_show(struct seq_file *m, void *v)
383 {
384         struct ftrace_profile *rec = v;
385         char str[KSYM_SYMBOL_LEN];
386 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
387         static DEFINE_MUTEX(mutex);
388         static struct trace_seq s;
389         unsigned long long avg;
390 #endif
391
392         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
393         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
394
395 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
396         seq_printf(m, "    ");
397         avg = rec->time;
398         do_div(avg, rec->counter);
399
400         mutex_lock(&mutex);
401         trace_seq_init(&s);
402         trace_print_graph_duration(rec->time, &s);
403         trace_seq_puts(&s, "    ");
404         trace_print_graph_duration(avg, &s);
405         trace_print_seq(m, &s);
406         mutex_unlock(&mutex);
407 #endif
408         seq_putc(m, '\n');
409
410         return 0;
411 }
412
413 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
414 {
415         struct ftrace_profile_page *pg;
416
417         pg = stat->pages = stat->start;
418
419         while (pg) {
420                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
421                 pg->index = 0;
422                 pg = pg->next;
423         }
424
425         memset(stat->hash, 0,
426                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
427 }
428
429 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
430 {
431         struct ftrace_profile_page *pg;
432         int functions;
433         int pages;
434         int i;
435
436         /* If we already allocated, do nothing */
437         if (stat->pages)
438                 return 0;
439
440         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
441         if (!stat->pages)
442                 return -ENOMEM;
443
444 #ifdef CONFIG_DYNAMIC_FTRACE
445         functions = ftrace_update_tot_cnt;
446 #else
447         /*
448          * We do not know the number of functions that exist because
449          * dynamic tracing is what counts them. With past experience
450          * we have around 20K functions. That should be more than enough.
451          * It is highly unlikely we will execute every function in
452          * the kernel.
453          */
454         functions = 20000;
455 #endif
456
457         pg = stat->start = stat->pages;
458
459         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
460
461         for (i = 0; i < pages; i++) {
462                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
463                 if (!pg->next)
464                         goto out_free;
465                 pg = pg->next;
466         }
467
468         return 0;
469
470  out_free:
471         pg = stat->start;
472         while (pg) {
473                 unsigned long tmp = (unsigned long)pg;
474
475                 pg = pg->next;
476                 free_page(tmp);
477         }
478
479         free_page((unsigned long)stat->pages);
480         stat->pages = NULL;
481         stat->start = NULL;
482
483         return -ENOMEM;
484 }
485
486 static int ftrace_profile_init_cpu(int cpu)
487 {
488         struct ftrace_profile_stat *stat;
489         int size;
490
491         stat = &per_cpu(ftrace_profile_stats, cpu);
492
493         if (stat->hash) {
494                 /* If the profile is already created, simply reset it */
495                 ftrace_profile_reset(stat);
496                 return 0;
497         }
498
499         /*
500          * We are profiling all functions, but usually only a few thousand
501          * functions are hit. We'll make a hash of 1024 items.
502          */
503         size = FTRACE_PROFILE_HASH_SIZE;
504
505         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
506
507         if (!stat->hash)
508                 return -ENOMEM;
509
510         if (!ftrace_profile_bits) {
511                 size--;
512
513                 for (; size; size >>= 1)
514                         ftrace_profile_bits++;
515         }
516
517         /* Preallocate the function profiling pages */
518         if (ftrace_profile_pages_init(stat) < 0) {
519                 kfree(stat->hash);
520                 stat->hash = NULL;
521                 return -ENOMEM;
522         }
523
524         return 0;
525 }
526
527 static int ftrace_profile_init(void)
528 {
529         int cpu;
530         int ret = 0;
531
532         for_each_online_cpu(cpu) {
533                 ret = ftrace_profile_init_cpu(cpu);
534                 if (ret)
535                         break;
536         }
537
538         return ret;
539 }
540
541 /* interrupts must be disabled */
542 static struct ftrace_profile *
543 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
544 {
545         struct ftrace_profile *rec;
546         struct hlist_head *hhd;
547         struct hlist_node *n;
548         unsigned long key;
549
550         key = hash_long(ip, ftrace_profile_bits);
551         hhd = &stat->hash[key];
552
553         if (hlist_empty(hhd))
554                 return NULL;
555
556         hlist_for_each_entry_rcu(rec, n, hhd, node) {
557                 if (rec->ip == ip)
558                         return rec;
559         }
560
561         return NULL;
562 }
563
564 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
565                                struct ftrace_profile *rec)
566 {
567         unsigned long key;
568
569         key = hash_long(rec->ip, ftrace_profile_bits);
570         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
571 }
572
573 /*
574  * The memory is already allocated, this simply finds a new record to use.
575  */
576 static struct ftrace_profile *
577 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
578 {
579         struct ftrace_profile *rec = NULL;
580
581         /* prevent recursion (from NMIs) */
582         if (atomic_inc_return(&stat->disabled) != 1)
583                 goto out;
584
585         /*
586          * Try to find the function again since an NMI
587          * could have added it
588          */
589         rec = ftrace_find_profiled_func(stat, ip);
590         if (rec)
591                 goto out;
592
593         if (stat->pages->index == PROFILES_PER_PAGE) {
594                 if (!stat->pages->next)
595                         goto out;
596                 stat->pages = stat->pages->next;
597         }
598
599         rec = &stat->pages->records[stat->pages->index++];
600         rec->ip = ip;
601         ftrace_add_profile(stat, rec);
602
603  out:
604         atomic_dec(&stat->disabled);
605
606         return rec;
607 }
608
609 static void
610 function_profile_call(unsigned long ip, unsigned long parent_ip)
611 {
612         struct ftrace_profile_stat *stat;
613         struct ftrace_profile *rec;
614         unsigned long flags;
615
616         if (!ftrace_profile_enabled)
617                 return;
618
619         local_irq_save(flags);
620
621         stat = &__get_cpu_var(ftrace_profile_stats);
622         if (!stat->hash || !ftrace_profile_enabled)
623                 goto out;
624
625         rec = ftrace_find_profiled_func(stat, ip);
626         if (!rec) {
627                 rec = ftrace_profile_alloc(stat, ip);
628                 if (!rec)
629                         goto out;
630         }
631
632         rec->counter++;
633  out:
634         local_irq_restore(flags);
635 }
636
637 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
638 static int profile_graph_entry(struct ftrace_graph_ent *trace)
639 {
640         function_profile_call(trace->func, 0);
641         return 1;
642 }
643
644 static void profile_graph_return(struct ftrace_graph_ret *trace)
645 {
646         struct ftrace_profile_stat *stat;
647         unsigned long long calltime;
648         struct ftrace_profile *rec;
649         unsigned long flags;
650
651         local_irq_save(flags);
652         stat = &__get_cpu_var(ftrace_profile_stats);
653         if (!stat->hash || !ftrace_profile_enabled)
654                 goto out;
655
656         calltime = trace->rettime - trace->calltime;
657
658         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
659                 int index;
660
661                 index = trace->depth;
662
663                 /* Append this call time to the parent time to subtract */
664                 if (index)
665                         current->ret_stack[index - 1].subtime += calltime;
666
667                 if (current->ret_stack[index].subtime < calltime)
668                         calltime -= current->ret_stack[index].subtime;
669                 else
670                         calltime = 0;
671         }
672
673         rec = ftrace_find_profiled_func(stat, trace->func);
674         if (rec)
675                 rec->time += calltime;
676
677  out:
678         local_irq_restore(flags);
679 }
680
681 static int register_ftrace_profiler(void)
682 {
683         return register_ftrace_graph(&profile_graph_return,
684                                      &profile_graph_entry);
685 }
686
687 static void unregister_ftrace_profiler(void)
688 {
689         unregister_ftrace_graph();
690 }
691 #else
692 static struct ftrace_ops ftrace_profile_ops __read_mostly =
693 {
694         .func           = function_profile_call,
695 };
696
697 static int register_ftrace_profiler(void)
698 {
699         return register_ftrace_function(&ftrace_profile_ops);
700 }
701
702 static void unregister_ftrace_profiler(void)
703 {
704         unregister_ftrace_function(&ftrace_profile_ops);
705 }
706 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
707
708 static ssize_t
709 ftrace_profile_write(struct file *filp, const char __user *ubuf,
710                      size_t cnt, loff_t *ppos)
711 {
712         unsigned long val;
713         char buf[64];           /* big enough to hold a number */
714         int ret;
715
716         if (cnt >= sizeof(buf))
717                 return -EINVAL;
718
719         if (copy_from_user(&buf, ubuf, cnt))
720                 return -EFAULT;
721
722         buf[cnt] = 0;
723
724         ret = strict_strtoul(buf, 10, &val);
725         if (ret < 0)
726                 return ret;
727
728         val = !!val;
729
730         mutex_lock(&ftrace_profile_lock);
731         if (ftrace_profile_enabled ^ val) {
732                 if (val) {
733                         ret = ftrace_profile_init();
734                         if (ret < 0) {
735                                 cnt = ret;
736                                 goto out;
737                         }
738
739                         ret = register_ftrace_profiler();
740                         if (ret < 0) {
741                                 cnt = ret;
742                                 goto out;
743                         }
744                         ftrace_profile_enabled = 1;
745                 } else {
746                         ftrace_profile_enabled = 0;
747                         /*
748                          * unregister_ftrace_profiler calls stop_machine
749                          * so this acts like an synchronize_sched.
750                          */
751                         unregister_ftrace_profiler();
752                 }
753         }
754  out:
755         mutex_unlock(&ftrace_profile_lock);
756
757         *ppos += cnt;
758
759         return cnt;
760 }
761
762 static ssize_t
763 ftrace_profile_read(struct file *filp, char __user *ubuf,
764                      size_t cnt, loff_t *ppos)
765 {
766         char buf[64];           /* big enough to hold a number */
767         int r;
768
769         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
770         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
771 }
772
773 static const struct file_operations ftrace_profile_fops = {
774         .open           = tracing_open_generic,
775         .read           = ftrace_profile_read,
776         .write          = ftrace_profile_write,
777 };
778
779 /* used to initialize the real stat files */
780 static struct tracer_stat function_stats __initdata = {
781         .name           = "functions",
782         .stat_start     = function_stat_start,
783         .stat_next      = function_stat_next,
784         .stat_cmp       = function_stat_cmp,
785         .stat_headers   = function_stat_headers,
786         .stat_show      = function_stat_show
787 };
788
789 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
790 {
791         struct ftrace_profile_stat *stat;
792         struct dentry *entry;
793         char *name;
794         int ret;
795         int cpu;
796
797         for_each_possible_cpu(cpu) {
798                 stat = &per_cpu(ftrace_profile_stats, cpu);
799
800                 /* allocate enough for function name + cpu number */
801                 name = kmalloc(32, GFP_KERNEL);
802                 if (!name) {
803                         /*
804                          * The files created are permanent, if something happens
805                          * we still do not free memory.
806                          */
807                         WARN(1,
808                              "Could not allocate stat file for cpu %d\n",
809                              cpu);
810                         return;
811                 }
812                 stat->stat = function_stats;
813                 snprintf(name, 32, "function%d", cpu);
814                 stat->stat.name = name;
815                 ret = register_stat_tracer(&stat->stat);
816                 if (ret) {
817                         WARN(1,
818                              "Could not register function stat for cpu %d\n",
819                              cpu);
820                         kfree(name);
821                         return;
822                 }
823         }
824
825         entry = debugfs_create_file("function_profile_enabled", 0644,
826                                     d_tracer, NULL, &ftrace_profile_fops);
827         if (!entry)
828                 pr_warning("Could not create debugfs "
829                            "'function_profile_enabled' entry\n");
830 }
831
832 #else /* CONFIG_FUNCTION_PROFILER */
833 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
834 {
835 }
836 #endif /* CONFIG_FUNCTION_PROFILER */
837
838 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
839
840 #ifdef CONFIG_DYNAMIC_FTRACE
841
842 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
843 # error Dynamic ftrace depends on MCOUNT_RECORD
844 #endif
845
846 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
847
848 struct ftrace_func_probe {
849         struct hlist_node       node;
850         struct ftrace_probe_ops *ops;
851         unsigned long           flags;
852         unsigned long           ip;
853         void                    *data;
854         struct rcu_head         rcu;
855 };
856
857 enum {
858         FTRACE_ENABLE_CALLS             = (1 << 0),
859         FTRACE_DISABLE_CALLS            = (1 << 1),
860         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
861         FTRACE_ENABLE_MCOUNT            = (1 << 3),
862         FTRACE_DISABLE_MCOUNT           = (1 << 4),
863         FTRACE_START_FUNC_RET           = (1 << 5),
864         FTRACE_STOP_FUNC_RET            = (1 << 6),
865 };
866
867 static int ftrace_filtered;
868
869 static struct dyn_ftrace *ftrace_new_addrs;
870
871 static DEFINE_MUTEX(ftrace_regex_lock);
872
873 struct ftrace_page {
874         struct ftrace_page      *next;
875         int                     index;
876         struct dyn_ftrace       records[];
877 };
878
879 #define ENTRIES_PER_PAGE \
880   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
881
882 /* estimate from running different kernels */
883 #define NR_TO_INIT              10000
884
885 static struct ftrace_page       *ftrace_pages_start;
886 static struct ftrace_page       *ftrace_pages;
887
888 static struct dyn_ftrace *ftrace_free_records;
889
890 /*
891  * This is a double for. Do not use 'break' to break out of the loop,
892  * you must use a goto.
893  */
894 #define do_for_each_ftrace_rec(pg, rec)                                 \
895         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
896                 int _____i;                                             \
897                 for (_____i = 0; _____i < pg->index; _____i++) {        \
898                         rec = &pg->records[_____i];
899
900 #define while_for_each_ftrace_rec()             \
901                 }                               \
902         }
903
904 static void ftrace_free_rec(struct dyn_ftrace *rec)
905 {
906         rec->freelist = ftrace_free_records;
907         ftrace_free_records = rec;
908         rec->flags |= FTRACE_FL_FREE;
909 }
910
911 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
912 {
913         struct dyn_ftrace *rec;
914
915         /* First check for freed records */
916         if (ftrace_free_records) {
917                 rec = ftrace_free_records;
918
919                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
920                         FTRACE_WARN_ON_ONCE(1);
921                         ftrace_free_records = NULL;
922                         return NULL;
923                 }
924
925                 ftrace_free_records = rec->freelist;
926                 memset(rec, 0, sizeof(*rec));
927                 return rec;
928         }
929
930         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
931                 if (!ftrace_pages->next) {
932                         /* allocate another page */
933                         ftrace_pages->next =
934                                 (void *)get_zeroed_page(GFP_KERNEL);
935                         if (!ftrace_pages->next)
936                                 return NULL;
937                 }
938                 ftrace_pages = ftrace_pages->next;
939         }
940
941         return &ftrace_pages->records[ftrace_pages->index++];
942 }
943
944 static struct dyn_ftrace *
945 ftrace_record_ip(unsigned long ip)
946 {
947         struct dyn_ftrace *rec;
948
949         if (ftrace_disabled)
950                 return NULL;
951
952         rec = ftrace_alloc_dyn_node(ip);
953         if (!rec)
954                 return NULL;
955
956         rec->ip = ip;
957         rec->newlist = ftrace_new_addrs;
958         ftrace_new_addrs = rec;
959
960         return rec;
961 }
962
963 static void print_ip_ins(const char *fmt, unsigned char *p)
964 {
965         int i;
966
967         printk(KERN_CONT "%s", fmt);
968
969         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
970                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
971 }
972
973 static void ftrace_bug(int failed, unsigned long ip)
974 {
975         switch (failed) {
976         case -EFAULT:
977                 FTRACE_WARN_ON_ONCE(1);
978                 pr_info("ftrace faulted on modifying ");
979                 print_ip_sym(ip);
980                 break;
981         case -EINVAL:
982                 FTRACE_WARN_ON_ONCE(1);
983                 pr_info("ftrace failed to modify ");
984                 print_ip_sym(ip);
985                 print_ip_ins(" actual: ", (unsigned char *)ip);
986                 printk(KERN_CONT "\n");
987                 break;
988         case -EPERM:
989                 FTRACE_WARN_ON_ONCE(1);
990                 pr_info("ftrace faulted on writing ");
991                 print_ip_sym(ip);
992                 break;
993         default:
994                 FTRACE_WARN_ON_ONCE(1);
995                 pr_info("ftrace faulted on unknown error ");
996                 print_ip_sym(ip);
997         }
998 }
999
1000
1001 /* Return 1 if the address range is reserved for ftrace */
1002 int ftrace_text_reserved(void *start, void *end)
1003 {
1004         struct dyn_ftrace *rec;
1005         struct ftrace_page *pg;
1006
1007         do_for_each_ftrace_rec(pg, rec) {
1008                 if (rec->ip <= (unsigned long)end &&
1009                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1010                         return 1;
1011         } while_for_each_ftrace_rec();
1012         return 0;
1013 }
1014
1015
1016 static int
1017 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1018 {
1019         unsigned long ftrace_addr;
1020         unsigned long flag = 0UL;
1021
1022         ftrace_addr = (unsigned long)FTRACE_ADDR;
1023
1024         /*
1025          * If this record is not to be traced or we want to disable it,
1026          * then disable it.
1027          *
1028          * If we want to enable it and filtering is off, then enable it.
1029          *
1030          * If we want to enable it and filtering is on, enable it only if
1031          * it's filtered
1032          */
1033         if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1034                 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1035                         flag = FTRACE_FL_ENABLED;
1036         }
1037
1038         /* If the state of this record hasn't changed, then do nothing */
1039         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1040                 return 0;
1041
1042         if (flag) {
1043                 rec->flags |= FTRACE_FL_ENABLED;
1044                 return ftrace_make_call(rec, ftrace_addr);
1045         }
1046
1047         rec->flags &= ~FTRACE_FL_ENABLED;
1048         return ftrace_make_nop(NULL, rec, ftrace_addr);
1049 }
1050
1051 static void ftrace_replace_code(int enable)
1052 {
1053         struct dyn_ftrace *rec;
1054         struct ftrace_page *pg;
1055         int failed;
1056
1057         do_for_each_ftrace_rec(pg, rec) {
1058                 /*
1059                  * Skip over free records, records that have
1060                  * failed and not converted.
1061                  */
1062                 if (rec->flags & FTRACE_FL_FREE ||
1063                     rec->flags & FTRACE_FL_FAILED ||
1064                     !(rec->flags & FTRACE_FL_CONVERTED))
1065                         continue;
1066
1067                 failed = __ftrace_replace_code(rec, enable);
1068                 if (failed) {
1069                         rec->flags |= FTRACE_FL_FAILED;
1070                         ftrace_bug(failed, rec->ip);
1071                         /* Stop processing */
1072                         return;
1073                 }
1074         } while_for_each_ftrace_rec();
1075 }
1076
1077 static int
1078 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1079 {
1080         unsigned long ip;
1081         int ret;
1082
1083         ip = rec->ip;
1084
1085         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1086         if (ret) {
1087                 ftrace_bug(ret, ip);
1088                 rec->flags |= FTRACE_FL_FAILED;
1089                 return 0;
1090         }
1091         return 1;
1092 }
1093
1094 /*
1095  * archs can override this function if they must do something
1096  * before the modifying code is performed.
1097  */
1098 int __weak ftrace_arch_code_modify_prepare(void)
1099 {
1100         return 0;
1101 }
1102
1103 /*
1104  * archs can override this function if they must do something
1105  * after the modifying code is performed.
1106  */
1107 int __weak ftrace_arch_code_modify_post_process(void)
1108 {
1109         return 0;
1110 }
1111
1112 static int __ftrace_modify_code(void *data)
1113 {
1114         int *command = data;
1115
1116         if (*command & FTRACE_ENABLE_CALLS)
1117                 ftrace_replace_code(1);
1118         else if (*command & FTRACE_DISABLE_CALLS)
1119                 ftrace_replace_code(0);
1120
1121         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1122                 ftrace_update_ftrace_func(ftrace_trace_function);
1123
1124         if (*command & FTRACE_START_FUNC_RET)
1125                 ftrace_enable_ftrace_graph_caller();
1126         else if (*command & FTRACE_STOP_FUNC_RET)
1127                 ftrace_disable_ftrace_graph_caller();
1128
1129         return 0;
1130 }
1131
1132 static void ftrace_run_update_code(int command)
1133 {
1134         int ret;
1135
1136         ret = ftrace_arch_code_modify_prepare();
1137         FTRACE_WARN_ON(ret);
1138         if (ret)
1139                 return;
1140
1141         stop_machine(__ftrace_modify_code, &command, NULL);
1142
1143         ret = ftrace_arch_code_modify_post_process();
1144         FTRACE_WARN_ON(ret);
1145 }
1146
1147 static ftrace_func_t saved_ftrace_func;
1148 static int ftrace_start_up;
1149
1150 static void ftrace_startup_enable(int command)
1151 {
1152         if (saved_ftrace_func != ftrace_trace_function) {
1153                 saved_ftrace_func = ftrace_trace_function;
1154                 command |= FTRACE_UPDATE_TRACE_FUNC;
1155         }
1156
1157         if (!command || !ftrace_enabled)
1158                 return;
1159
1160         ftrace_run_update_code(command);
1161 }
1162
1163 static void ftrace_startup(int command)
1164 {
1165         if (unlikely(ftrace_disabled))
1166                 return;
1167
1168         ftrace_start_up++;
1169         command |= FTRACE_ENABLE_CALLS;
1170
1171         ftrace_startup_enable(command);
1172 }
1173
1174 static void ftrace_shutdown(int command)
1175 {
1176         if (unlikely(ftrace_disabled))
1177                 return;
1178
1179         ftrace_start_up--;
1180         /*
1181          * Just warn in case of unbalance, no need to kill ftrace, it's not
1182          * critical but the ftrace_call callers may be never nopped again after
1183          * further ftrace uses.
1184          */
1185         WARN_ON_ONCE(ftrace_start_up < 0);
1186
1187         if (!ftrace_start_up)
1188                 command |= FTRACE_DISABLE_CALLS;
1189
1190         if (saved_ftrace_func != ftrace_trace_function) {
1191                 saved_ftrace_func = ftrace_trace_function;
1192                 command |= FTRACE_UPDATE_TRACE_FUNC;
1193         }
1194
1195         if (!command || !ftrace_enabled)
1196                 return;
1197
1198         ftrace_run_update_code(command);
1199 }
1200
1201 static void ftrace_startup_sysctl(void)
1202 {
1203         int command = FTRACE_ENABLE_MCOUNT;
1204
1205         if (unlikely(ftrace_disabled))
1206                 return;
1207
1208         /* Force update next time */
1209         saved_ftrace_func = NULL;
1210         /* ftrace_start_up is true if we want ftrace running */
1211         if (ftrace_start_up)
1212                 command |= FTRACE_ENABLE_CALLS;
1213
1214         ftrace_run_update_code(command);
1215 }
1216
1217 static void ftrace_shutdown_sysctl(void)
1218 {
1219         int command = FTRACE_DISABLE_MCOUNT;
1220
1221         if (unlikely(ftrace_disabled))
1222                 return;
1223
1224         /* ftrace_start_up is true if ftrace is running */
1225         if (ftrace_start_up)
1226                 command |= FTRACE_DISABLE_CALLS;
1227
1228         ftrace_run_update_code(command);
1229 }
1230
1231 static cycle_t          ftrace_update_time;
1232 static unsigned long    ftrace_update_cnt;
1233 unsigned long           ftrace_update_tot_cnt;
1234
1235 static int ftrace_update_code(struct module *mod)
1236 {
1237         struct dyn_ftrace *p;
1238         cycle_t start, stop;
1239
1240         start = ftrace_now(raw_smp_processor_id());
1241         ftrace_update_cnt = 0;
1242
1243         while (ftrace_new_addrs) {
1244
1245                 /* If something went wrong, bail without enabling anything */
1246                 if (unlikely(ftrace_disabled))
1247                         return -1;
1248
1249                 p = ftrace_new_addrs;
1250                 ftrace_new_addrs = p->newlist;
1251                 p->flags = 0L;
1252
1253                 /*
1254                  * Do the initial record convertion from mcount jump
1255                  * to the NOP instructions.
1256                  */
1257                 if (!ftrace_code_disable(mod, p)) {
1258                         ftrace_free_rec(p);
1259                         continue;
1260                 }
1261
1262                 p->flags |= FTRACE_FL_CONVERTED;
1263                 ftrace_update_cnt++;
1264
1265                 /*
1266                  * If the tracing is enabled, go ahead and enable the record.
1267                  *
1268                  * The reason not to enable the record immediatelly is the
1269                  * inherent check of ftrace_make_nop/ftrace_make_call for
1270                  * correct previous instructions.  Making first the NOP
1271                  * conversion puts the module to the correct state, thus
1272                  * passing the ftrace_make_call check.
1273                  */
1274                 if (ftrace_start_up) {
1275                         int failed = __ftrace_replace_code(p, 1);
1276                         if (failed) {
1277                                 ftrace_bug(failed, p->ip);
1278                                 ftrace_free_rec(p);
1279                         }
1280                 }
1281         }
1282
1283         stop = ftrace_now(raw_smp_processor_id());
1284         ftrace_update_time = stop - start;
1285         ftrace_update_tot_cnt += ftrace_update_cnt;
1286
1287         return 0;
1288 }
1289
1290 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1291 {
1292         struct ftrace_page *pg;
1293         int cnt;
1294         int i;
1295
1296         /* allocate a few pages */
1297         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1298         if (!ftrace_pages_start)
1299                 return -1;
1300
1301         /*
1302          * Allocate a few more pages.
1303          *
1304          * TODO: have some parser search vmlinux before
1305          *   final linking to find all calls to ftrace.
1306          *   Then we can:
1307          *    a) know how many pages to allocate.
1308          *     and/or
1309          *    b) set up the table then.
1310          *
1311          *  The dynamic code is still necessary for
1312          *  modules.
1313          */
1314
1315         pg = ftrace_pages = ftrace_pages_start;
1316
1317         cnt = num_to_init / ENTRIES_PER_PAGE;
1318         pr_info("ftrace: allocating %ld entries in %d pages\n",
1319                 num_to_init, cnt + 1);
1320
1321         for (i = 0; i < cnt; i++) {
1322                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1323
1324                 /* If we fail, we'll try later anyway */
1325                 if (!pg->next)
1326                         break;
1327
1328                 pg = pg->next;
1329         }
1330
1331         return 0;
1332 }
1333
1334 enum {
1335         FTRACE_ITER_FILTER      = (1 << 0),
1336         FTRACE_ITER_NOTRACE     = (1 << 1),
1337         FTRACE_ITER_FAILURES    = (1 << 2),
1338         FTRACE_ITER_PRINTALL    = (1 << 3),
1339         FTRACE_ITER_HASH        = (1 << 4),
1340 };
1341
1342 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1343
1344 struct ftrace_iterator {
1345         struct ftrace_page      *pg;
1346         int                     hidx;
1347         int                     idx;
1348         unsigned                flags;
1349         struct trace_parser     parser;
1350 };
1351
1352 static void *
1353 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1354 {
1355         struct ftrace_iterator *iter = m->private;
1356         struct hlist_node *hnd = v;
1357         struct hlist_head *hhd;
1358
1359         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1360
1361         (*pos)++;
1362
1363  retry:
1364         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1365                 return NULL;
1366
1367         hhd = &ftrace_func_hash[iter->hidx];
1368
1369         if (hlist_empty(hhd)) {
1370                 iter->hidx++;
1371                 hnd = NULL;
1372                 goto retry;
1373         }
1374
1375         if (!hnd)
1376                 hnd = hhd->first;
1377         else {
1378                 hnd = hnd->next;
1379                 if (!hnd) {
1380                         iter->hidx++;
1381                         goto retry;
1382                 }
1383         }
1384
1385         return hnd;
1386 }
1387
1388 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1389 {
1390         struct ftrace_iterator *iter = m->private;
1391         void *p = NULL;
1392         loff_t l;
1393
1394         if (!(iter->flags & FTRACE_ITER_HASH))
1395                 *pos = 0;
1396
1397         iter->flags |= FTRACE_ITER_HASH;
1398
1399         iter->hidx = 0;
1400         for (l = 0; l <= *pos; ) {
1401                 p = t_hash_next(m, p, &l);
1402                 if (!p)
1403                         break;
1404         }
1405         return p;
1406 }
1407
1408 static int t_hash_show(struct seq_file *m, void *v)
1409 {
1410         struct ftrace_func_probe *rec;
1411         struct hlist_node *hnd = v;
1412
1413         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1414
1415         if (rec->ops->print)
1416                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1417
1418         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1419
1420         if (rec->data)
1421                 seq_printf(m, ":%p", rec->data);
1422         seq_putc(m, '\n');
1423
1424         return 0;
1425 }
1426
1427 static void *
1428 t_next(struct seq_file *m, void *v, loff_t *pos)
1429 {
1430         struct ftrace_iterator *iter = m->private;
1431         struct dyn_ftrace *rec = NULL;
1432
1433         if (iter->flags & FTRACE_ITER_HASH)
1434                 return t_hash_next(m, v, pos);
1435
1436         (*pos)++;
1437
1438         if (iter->flags & FTRACE_ITER_PRINTALL)
1439                 return NULL;
1440
1441  retry:
1442         if (iter->idx >= iter->pg->index) {
1443                 if (iter->pg->next) {
1444                         iter->pg = iter->pg->next;
1445                         iter->idx = 0;
1446                         goto retry;
1447                 }
1448         } else {
1449                 rec = &iter->pg->records[iter->idx++];
1450                 if ((rec->flags & FTRACE_FL_FREE) ||
1451
1452                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1453                      (rec->flags & FTRACE_FL_FAILED)) ||
1454
1455                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1456                      !(rec->flags & FTRACE_FL_FAILED)) ||
1457
1458                     ((iter->flags & FTRACE_ITER_FILTER) &&
1459                      !(rec->flags & FTRACE_FL_FILTER)) ||
1460
1461                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1462                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1463                         rec = NULL;
1464                         goto retry;
1465                 }
1466         }
1467
1468         return rec;
1469 }
1470
1471 static void *t_start(struct seq_file *m, loff_t *pos)
1472 {
1473         struct ftrace_iterator *iter = m->private;
1474         void *p = NULL;
1475         loff_t l;
1476
1477         mutex_lock(&ftrace_lock);
1478         /*
1479          * For set_ftrace_filter reading, if we have the filter
1480          * off, we can short cut and just print out that all
1481          * functions are enabled.
1482          */
1483         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1484                 if (*pos > 0)
1485                         return t_hash_start(m, pos);
1486                 iter->flags |= FTRACE_ITER_PRINTALL;
1487                 return iter;
1488         }
1489
1490         if (iter->flags & FTRACE_ITER_HASH)
1491                 return t_hash_start(m, pos);
1492
1493         iter->pg = ftrace_pages_start;
1494         iter->idx = 0;
1495         for (l = 0; l <= *pos; ) {
1496                 p = t_next(m, p, &l);
1497                 if (!p)
1498                         break;
1499         }
1500
1501         if (!p && iter->flags & FTRACE_ITER_FILTER)
1502                 return t_hash_start(m, pos);
1503
1504         return p;
1505 }
1506
1507 static void t_stop(struct seq_file *m, void *p)
1508 {
1509         mutex_unlock(&ftrace_lock);
1510 }
1511
1512 static int t_show(struct seq_file *m, void *v)
1513 {
1514         struct ftrace_iterator *iter = m->private;
1515         struct dyn_ftrace *rec = v;
1516
1517         if (iter->flags & FTRACE_ITER_HASH)
1518                 return t_hash_show(m, v);
1519
1520         if (iter->flags & FTRACE_ITER_PRINTALL) {
1521                 seq_printf(m, "#### all functions enabled ####\n");
1522                 return 0;
1523         }
1524
1525         if (!rec)
1526                 return 0;
1527
1528         seq_printf(m, "%ps\n", (void *)rec->ip);
1529
1530         return 0;
1531 }
1532
1533 static const struct seq_operations show_ftrace_seq_ops = {
1534         .start = t_start,
1535         .next = t_next,
1536         .stop = t_stop,
1537         .show = t_show,
1538 };
1539
1540 static int
1541 ftrace_avail_open(struct inode *inode, struct file *file)
1542 {
1543         struct ftrace_iterator *iter;
1544         int ret;
1545
1546         if (unlikely(ftrace_disabled))
1547                 return -ENODEV;
1548
1549         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1550         if (!iter)
1551                 return -ENOMEM;
1552
1553         iter->pg = ftrace_pages_start;
1554
1555         ret = seq_open(file, &show_ftrace_seq_ops);
1556         if (!ret) {
1557                 struct seq_file *m = file->private_data;
1558
1559                 m->private = iter;
1560         } else {
1561                 kfree(iter);
1562         }
1563
1564         return ret;
1565 }
1566
1567 static int
1568 ftrace_failures_open(struct inode *inode, struct file *file)
1569 {
1570         int ret;
1571         struct seq_file *m;
1572         struct ftrace_iterator *iter;
1573
1574         ret = ftrace_avail_open(inode, file);
1575         if (!ret) {
1576                 m = (struct seq_file *)file->private_data;
1577                 iter = (struct ftrace_iterator *)m->private;
1578                 iter->flags = FTRACE_ITER_FAILURES;
1579         }
1580
1581         return ret;
1582 }
1583
1584
1585 static void ftrace_filter_reset(int enable)
1586 {
1587         struct ftrace_page *pg;
1588         struct dyn_ftrace *rec;
1589         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1590
1591         mutex_lock(&ftrace_lock);
1592         if (enable)
1593                 ftrace_filtered = 0;
1594         do_for_each_ftrace_rec(pg, rec) {
1595                 if (rec->flags & FTRACE_FL_FAILED)
1596                         continue;
1597                 rec->flags &= ~type;
1598         } while_for_each_ftrace_rec();
1599         mutex_unlock(&ftrace_lock);
1600 }
1601
1602 static int
1603 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1604 {
1605         struct ftrace_iterator *iter;
1606         int ret = 0;
1607
1608         if (unlikely(ftrace_disabled))
1609                 return -ENODEV;
1610
1611         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1612         if (!iter)
1613                 return -ENOMEM;
1614
1615         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
1616                 kfree(iter);
1617                 return -ENOMEM;
1618         }
1619
1620         mutex_lock(&ftrace_regex_lock);
1621         if ((file->f_mode & FMODE_WRITE) &&
1622             (file->f_flags & O_TRUNC))
1623                 ftrace_filter_reset(enable);
1624
1625         if (file->f_mode & FMODE_READ) {
1626                 iter->pg = ftrace_pages_start;
1627                 iter->flags = enable ? FTRACE_ITER_FILTER :
1628                         FTRACE_ITER_NOTRACE;
1629
1630                 ret = seq_open(file, &show_ftrace_seq_ops);
1631                 if (!ret) {
1632                         struct seq_file *m = file->private_data;
1633                         m->private = iter;
1634                 } else {
1635                         trace_parser_put(&iter->parser);
1636                         kfree(iter);
1637                 }
1638         } else
1639                 file->private_data = iter;
1640         mutex_unlock(&ftrace_regex_lock);
1641
1642         return ret;
1643 }
1644
1645 static int
1646 ftrace_filter_open(struct inode *inode, struct file *file)
1647 {
1648         return ftrace_regex_open(inode, file, 1);
1649 }
1650
1651 static int
1652 ftrace_notrace_open(struct inode *inode, struct file *file)
1653 {
1654         return ftrace_regex_open(inode, file, 0);
1655 }
1656
1657 static loff_t
1658 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1659 {
1660         loff_t ret;
1661
1662         if (file->f_mode & FMODE_READ)
1663                 ret = seq_lseek(file, offset, origin);
1664         else
1665                 file->f_pos = ret = 1;
1666
1667         return ret;
1668 }
1669
1670 static int ftrace_match(char *str, char *regex, int len, int type)
1671 {
1672         int matched = 0;
1673         int slen;
1674
1675         switch (type) {
1676         case MATCH_FULL:
1677                 if (strcmp(str, regex) == 0)
1678                         matched = 1;
1679                 break;
1680         case MATCH_FRONT_ONLY:
1681                 if (strncmp(str, regex, len) == 0)
1682                         matched = 1;
1683                 break;
1684         case MATCH_MIDDLE_ONLY:
1685                 if (strstr(str, regex))
1686                         matched = 1;
1687                 break;
1688         case MATCH_END_ONLY:
1689                 slen = strlen(str);
1690                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
1691                         matched = 1;
1692                 break;
1693         }
1694
1695         return matched;
1696 }
1697
1698 static int
1699 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1700 {
1701         char str[KSYM_SYMBOL_LEN];
1702
1703         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1704         return ftrace_match(str, regex, len, type);
1705 }
1706
1707 static int ftrace_match_records(char *buff, int len, int enable)
1708 {
1709         unsigned int search_len;
1710         struct ftrace_page *pg;
1711         struct dyn_ftrace *rec;
1712         unsigned long flag;
1713         char *search;
1714         int type;
1715         int not;
1716         int found = 0;
1717
1718         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1719         type = filter_parse_regex(buff, len, &search, &not);
1720
1721         search_len = strlen(search);
1722
1723         mutex_lock(&ftrace_lock);
1724         do_for_each_ftrace_rec(pg, rec) {
1725
1726                 if (rec->flags & FTRACE_FL_FAILED)
1727                         continue;
1728
1729                 if (ftrace_match_record(rec, search, search_len, type)) {
1730                         if (not)
1731                                 rec->flags &= ~flag;
1732                         else
1733                                 rec->flags |= flag;
1734                         found = 1;
1735                 }
1736                 /*
1737                  * Only enable filtering if we have a function that
1738                  * is filtered on.
1739                  */
1740                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1741                         ftrace_filtered = 1;
1742         } while_for_each_ftrace_rec();
1743         mutex_unlock(&ftrace_lock);
1744
1745         return found;
1746 }
1747
1748 static int
1749 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1750                            char *regex, int len, int type)
1751 {
1752         char str[KSYM_SYMBOL_LEN];
1753         char *modname;
1754
1755         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1756
1757         if (!modname || strcmp(modname, mod))
1758                 return 0;
1759
1760         /* blank search means to match all funcs in the mod */
1761         if (len)
1762                 return ftrace_match(str, regex, len, type);
1763         else
1764                 return 1;
1765 }
1766
1767 static int ftrace_match_module_records(char *buff, char *mod, int enable)
1768 {
1769         unsigned search_len = 0;
1770         struct ftrace_page *pg;
1771         struct dyn_ftrace *rec;
1772         int type = MATCH_FULL;
1773         char *search = buff;
1774         unsigned long flag;
1775         int not = 0;
1776         int found = 0;
1777
1778         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1779
1780         /* blank or '*' mean the same */
1781         if (strcmp(buff, "*") == 0)
1782                 buff[0] = 0;
1783
1784         /* handle the case of 'dont filter this module' */
1785         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1786                 buff[0] = 0;
1787                 not = 1;
1788         }
1789
1790         if (strlen(buff)) {
1791                 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1792                 search_len = strlen(search);
1793         }
1794
1795         mutex_lock(&ftrace_lock);
1796         do_for_each_ftrace_rec(pg, rec) {
1797
1798                 if (rec->flags & FTRACE_FL_FAILED)
1799                         continue;
1800
1801                 if (ftrace_match_module_record(rec, mod,
1802                                                search, search_len, type)) {
1803                         if (not)
1804                                 rec->flags &= ~flag;
1805                         else
1806                                 rec->flags |= flag;
1807                         found = 1;
1808                 }
1809                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1810                         ftrace_filtered = 1;
1811
1812         } while_for_each_ftrace_rec();
1813         mutex_unlock(&ftrace_lock);
1814
1815         return found;
1816 }
1817
1818 /*
1819  * We register the module command as a template to show others how
1820  * to register the a command as well.
1821  */
1822
1823 static int
1824 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1825 {
1826         char *mod;
1827
1828         /*
1829          * cmd == 'mod' because we only registered this func
1830          * for the 'mod' ftrace_func_command.
1831          * But if you register one func with multiple commands,
1832          * you can tell which command was used by the cmd
1833          * parameter.
1834          */
1835
1836         /* we must have a module name */
1837         if (!param)
1838                 return -EINVAL;
1839
1840         mod = strsep(&param, ":");
1841         if (!strlen(mod))
1842                 return -EINVAL;
1843
1844         if (ftrace_match_module_records(func, mod, enable))
1845                 return 0;
1846         return -EINVAL;
1847 }
1848
1849 static struct ftrace_func_command ftrace_mod_cmd = {
1850         .name                   = "mod",
1851         .func                   = ftrace_mod_callback,
1852 };
1853
1854 static int __init ftrace_mod_cmd_init(void)
1855 {
1856         return register_ftrace_command(&ftrace_mod_cmd);
1857 }
1858 device_initcall(ftrace_mod_cmd_init);
1859
1860 static void
1861 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1862 {
1863         struct ftrace_func_probe *entry;
1864         struct hlist_head *hhd;
1865         struct hlist_node *n;
1866         unsigned long key;
1867         int resched;
1868
1869         key = hash_long(ip, FTRACE_HASH_BITS);
1870
1871         hhd = &ftrace_func_hash[key];
1872
1873         if (hlist_empty(hhd))
1874                 return;
1875
1876         /*
1877          * Disable preemption for these calls to prevent a RCU grace
1878          * period. This syncs the hash iteration and freeing of items
1879          * on the hash. rcu_read_lock is too dangerous here.
1880          */
1881         resched = ftrace_preempt_disable();
1882         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1883                 if (entry->ip == ip)
1884                         entry->ops->func(ip, parent_ip, &entry->data);
1885         }
1886         ftrace_preempt_enable(resched);
1887 }
1888
1889 static struct ftrace_ops trace_probe_ops __read_mostly =
1890 {
1891         .func           = function_trace_probe_call,
1892 };
1893
1894 static int ftrace_probe_registered;
1895
1896 static void __enable_ftrace_function_probe(void)
1897 {
1898         int i;
1899
1900         if (ftrace_probe_registered)
1901                 return;
1902
1903         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1904                 struct hlist_head *hhd = &ftrace_func_hash[i];
1905                 if (hhd->first)
1906                         break;
1907         }
1908         /* Nothing registered? */
1909         if (i == FTRACE_FUNC_HASHSIZE)
1910                 return;
1911
1912         __register_ftrace_function(&trace_probe_ops);
1913         ftrace_startup(0);
1914         ftrace_probe_registered = 1;
1915 }
1916
1917 static void __disable_ftrace_function_probe(void)
1918 {
1919         int i;
1920
1921         if (!ftrace_probe_registered)
1922                 return;
1923
1924         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1925                 struct hlist_head *hhd = &ftrace_func_hash[i];
1926                 if (hhd->first)
1927                         return;
1928         }
1929
1930         /* no more funcs left */
1931         __unregister_ftrace_function(&trace_probe_ops);
1932         ftrace_shutdown(0);
1933         ftrace_probe_registered = 0;
1934 }
1935
1936
1937 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1938 {
1939         struct ftrace_func_probe *entry =
1940                 container_of(rhp, struct ftrace_func_probe, rcu);
1941
1942         if (entry->ops->free)
1943                 entry->ops->free(&entry->data);
1944         kfree(entry);
1945 }
1946
1947
1948 int
1949 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1950                               void *data)
1951 {
1952         struct ftrace_func_probe *entry;
1953         struct ftrace_page *pg;
1954         struct dyn_ftrace *rec;
1955         int type, len, not;
1956         unsigned long key;
1957         int count = 0;
1958         char *search;
1959
1960         type = filter_parse_regex(glob, strlen(glob), &search, &not);
1961         len = strlen(search);
1962
1963         /* we do not support '!' for function probes */
1964         if (WARN_ON(not))
1965                 return -EINVAL;
1966
1967         mutex_lock(&ftrace_lock);
1968         do_for_each_ftrace_rec(pg, rec) {
1969
1970                 if (rec->flags & FTRACE_FL_FAILED)
1971                         continue;
1972
1973                 if (!ftrace_match_record(rec, search, len, type))
1974                         continue;
1975
1976                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1977                 if (!entry) {
1978                         /* If we did not process any, then return error */
1979                         if (!count)
1980                                 count = -ENOMEM;
1981                         goto out_unlock;
1982                 }
1983
1984                 count++;
1985
1986                 entry->data = data;
1987
1988                 /*
1989                  * The caller might want to do something special
1990                  * for each function we find. We call the callback
1991                  * to give the caller an opportunity to do so.
1992                  */
1993                 if (ops->callback) {
1994                         if (ops->callback(rec->ip, &entry->data) < 0) {
1995                                 /* caller does not like this func */
1996                                 kfree(entry);
1997                                 continue;
1998                         }
1999                 }
2000
2001                 entry->ops = ops;
2002                 entry->ip = rec->ip;
2003
2004                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2005                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2006
2007         } while_for_each_ftrace_rec();
2008         __enable_ftrace_function_probe();
2009
2010  out_unlock:
2011         mutex_unlock(&ftrace_lock);
2012
2013         return count;
2014 }
2015
2016 enum {
2017         PROBE_TEST_FUNC         = 1,
2018         PROBE_TEST_DATA         = 2
2019 };
2020
2021 static void
2022 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2023                                   void *data, int flags)
2024 {
2025         struct ftrace_func_probe *entry;
2026         struct hlist_node *n, *tmp;
2027         char str[KSYM_SYMBOL_LEN];
2028         int type = MATCH_FULL;
2029         int i, len = 0;
2030         char *search;
2031
2032         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2033                 glob = NULL;
2034         else if (glob) {
2035                 int not;
2036
2037                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2038                 len = strlen(search);
2039
2040                 /* we do not support '!' for function probes */
2041                 if (WARN_ON(not))
2042                         return;
2043         }
2044
2045         mutex_lock(&ftrace_lock);
2046         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2047                 struct hlist_head *hhd = &ftrace_func_hash[i];
2048
2049                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2050
2051                         /* break up if statements for readability */
2052                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2053                                 continue;
2054
2055                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2056                                 continue;
2057
2058                         /* do this last, since it is the most expensive */
2059                         if (glob) {
2060                                 kallsyms_lookup(entry->ip, NULL, NULL,
2061                                                 NULL, str);
2062                                 if (!ftrace_match(str, glob, len, type))
2063                                         continue;
2064                         }
2065
2066                         hlist_del(&entry->node);
2067                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2068                 }
2069         }
2070         __disable_ftrace_function_probe();
2071         mutex_unlock(&ftrace_lock);
2072 }
2073
2074 void
2075 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2076                                 void *data)
2077 {
2078         __unregister_ftrace_function_probe(glob, ops, data,
2079                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2080 }
2081
2082 void
2083 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2084 {
2085         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2086 }
2087
2088 void unregister_ftrace_function_probe_all(char *glob)
2089 {
2090         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2091 }
2092
2093 static LIST_HEAD(ftrace_commands);
2094 static DEFINE_MUTEX(ftrace_cmd_mutex);
2095
2096 int register_ftrace_command(struct ftrace_func_command *cmd)
2097 {
2098         struct ftrace_func_command *p;
2099         int ret = 0;
2100
2101         mutex_lock(&ftrace_cmd_mutex);
2102         list_for_each_entry(p, &ftrace_commands, list) {
2103                 if (strcmp(cmd->name, p->name) == 0) {
2104                         ret = -EBUSY;
2105                         goto out_unlock;
2106                 }
2107         }
2108         list_add(&cmd->list, &ftrace_commands);
2109  out_unlock:
2110         mutex_unlock(&ftrace_cmd_mutex);
2111
2112         return ret;
2113 }
2114
2115 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2116 {
2117         struct ftrace_func_command *p, *n;
2118         int ret = -ENODEV;
2119
2120         mutex_lock(&ftrace_cmd_mutex);
2121         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2122                 if (strcmp(cmd->name, p->name) == 0) {
2123                         ret = 0;
2124                         list_del_init(&p->list);
2125                         goto out_unlock;
2126                 }
2127         }
2128  out_unlock:
2129         mutex_unlock(&ftrace_cmd_mutex);
2130
2131         return ret;
2132 }
2133
2134 static int ftrace_process_regex(char *buff, int len, int enable)
2135 {
2136         char *func, *command, *next = buff;
2137         struct ftrace_func_command *p;
2138         int ret = -EINVAL;
2139
2140         func = strsep(&next, ":");
2141
2142         if (!next) {
2143                 if (ftrace_match_records(func, len, enable))
2144                         return 0;
2145                 return ret;
2146         }
2147
2148         /* command found */
2149
2150         command = strsep(&next, ":");
2151
2152         mutex_lock(&ftrace_cmd_mutex);
2153         list_for_each_entry(p, &ftrace_commands, list) {
2154                 if (strcmp(p->name, command) == 0) {
2155                         ret = p->func(func, command, next, enable);
2156                         goto out_unlock;
2157                 }
2158         }
2159  out_unlock:
2160         mutex_unlock(&ftrace_cmd_mutex);
2161
2162         return ret;
2163 }
2164
2165 static ssize_t
2166 ftrace_regex_write(struct file *file, const char __user *ubuf,
2167                    size_t cnt, loff_t *ppos, int enable)
2168 {
2169         struct ftrace_iterator *iter;
2170         struct trace_parser *parser;
2171         ssize_t ret, read;
2172
2173         if (!cnt)
2174                 return 0;
2175
2176         mutex_lock(&ftrace_regex_lock);
2177
2178         if (file->f_mode & FMODE_READ) {
2179                 struct seq_file *m = file->private_data;
2180                 iter = m->private;
2181         } else
2182                 iter = file->private_data;
2183
2184         parser = &iter->parser;
2185         read = trace_get_user(parser, ubuf, cnt, ppos);
2186
2187         if (read >= 0 && trace_parser_loaded(parser) &&
2188             !trace_parser_cont(parser)) {
2189                 ret = ftrace_process_regex(parser->buffer,
2190                                            parser->idx, enable);
2191                 trace_parser_clear(parser);
2192                 if (ret)
2193                         goto out_unlock;
2194         }
2195
2196         ret = read;
2197 out_unlock:
2198         mutex_unlock(&ftrace_regex_lock);
2199
2200         return ret;
2201 }
2202
2203 static ssize_t
2204 ftrace_filter_write(struct file *file, const char __user *ubuf,
2205                     size_t cnt, loff_t *ppos)
2206 {
2207         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2208 }
2209
2210 static ssize_t
2211 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2212                      size_t cnt, loff_t *ppos)
2213 {
2214         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2215 }
2216
2217 static void
2218 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2219 {
2220         if (unlikely(ftrace_disabled))
2221                 return;
2222
2223         mutex_lock(&ftrace_regex_lock);
2224         if (reset)
2225                 ftrace_filter_reset(enable);
2226         if (buf)
2227                 ftrace_match_records(buf, len, enable);
2228         mutex_unlock(&ftrace_regex_lock);
2229 }
2230
2231 /**
2232  * ftrace_set_filter - set a function to filter on in ftrace
2233  * @buf - the string that holds the function filter text.
2234  * @len - the length of the string.
2235  * @reset - non zero to reset all filters before applying this filter.
2236  *
2237  * Filters denote which functions should be enabled when tracing is enabled.
2238  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2239  */
2240 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2241 {
2242         ftrace_set_regex(buf, len, reset, 1);
2243 }
2244
2245 /**
2246  * ftrace_set_notrace - set a function to not trace in ftrace
2247  * @buf - the string that holds the function notrace text.
2248  * @len - the length of the string.
2249  * @reset - non zero to reset all filters before applying this filter.
2250  *
2251  * Notrace Filters denote which functions should not be enabled when tracing
2252  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2253  * for tracing.
2254  */
2255 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2256 {
2257         ftrace_set_regex(buf, len, reset, 0);
2258 }
2259
2260 /*
2261  * command line interface to allow users to set filters on boot up.
2262  */
2263 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2264 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2265 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2266
2267 static int __init set_ftrace_notrace(char *str)
2268 {
2269         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2270         return 1;
2271 }
2272 __setup("ftrace_notrace=", set_ftrace_notrace);
2273
2274 static int __init set_ftrace_filter(char *str)
2275 {
2276         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2277         return 1;
2278 }
2279 __setup("ftrace_filter=", set_ftrace_filter);
2280
2281 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2282 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2283 static int __init set_graph_function(char *str)
2284 {
2285         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2286         return 1;
2287 }
2288 __setup("ftrace_graph_filter=", set_graph_function);
2289
2290 static void __init set_ftrace_early_graph(char *buf)
2291 {
2292         int ret;
2293         char *func;
2294
2295         while (buf) {
2296                 func = strsep(&buf, ",");
2297                 /* we allow only one expression at a time */
2298                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2299                                       func);
2300                 if (ret)
2301                         printk(KERN_DEBUG "ftrace: function %s not "
2302                                           "traceable\n", func);
2303         }
2304 }
2305 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2306
2307 static void __init set_ftrace_early_filter(char *buf, int enable)
2308 {
2309         char *func;
2310
2311         while (buf) {
2312                 func = strsep(&buf, ",");
2313                 ftrace_set_regex(func, strlen(func), 0, enable);
2314         }
2315 }
2316
2317 static void __init set_ftrace_early_filters(void)
2318 {
2319         if (ftrace_filter_buf[0])
2320                 set_ftrace_early_filter(ftrace_filter_buf, 1);
2321         if (ftrace_notrace_buf[0])
2322                 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2323 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2324         if (ftrace_graph_buf[0])
2325                 set_ftrace_early_graph(ftrace_graph_buf);
2326 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2327 }
2328
2329 static int
2330 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2331 {
2332         struct seq_file *m = (struct seq_file *)file->private_data;
2333         struct ftrace_iterator *iter;
2334         struct trace_parser *parser;
2335
2336         mutex_lock(&ftrace_regex_lock);
2337         if (file->f_mode & FMODE_READ) {
2338                 iter = m->private;
2339
2340                 seq_release(inode, file);
2341         } else
2342                 iter = file->private_data;
2343
2344         parser = &iter->parser;
2345         if (trace_parser_loaded(parser)) {
2346                 parser->buffer[parser->idx] = 0;
2347                 ftrace_match_records(parser->buffer, parser->idx, enable);
2348         }
2349
2350         mutex_lock(&ftrace_lock);
2351         if (ftrace_start_up && ftrace_enabled)
2352                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2353         mutex_unlock(&ftrace_lock);
2354
2355         trace_parser_put(parser);
2356         kfree(iter);
2357
2358         mutex_unlock(&ftrace_regex_lock);
2359         return 0;
2360 }
2361
2362 static int
2363 ftrace_filter_release(struct inode *inode, struct file *file)
2364 {
2365         return ftrace_regex_release(inode, file, 1);
2366 }
2367
2368 static int
2369 ftrace_notrace_release(struct inode *inode, struct file *file)
2370 {
2371         return ftrace_regex_release(inode, file, 0);
2372 }
2373
2374 static const struct file_operations ftrace_avail_fops = {
2375         .open = ftrace_avail_open,
2376         .read = seq_read,
2377         .llseek = seq_lseek,
2378         .release = seq_release_private,
2379 };
2380
2381 static const struct file_operations ftrace_failures_fops = {
2382         .open = ftrace_failures_open,
2383         .read = seq_read,
2384         .llseek = seq_lseek,
2385         .release = seq_release_private,
2386 };
2387
2388 static const struct file_operations ftrace_filter_fops = {
2389         .open = ftrace_filter_open,
2390         .read = seq_read,
2391         .write = ftrace_filter_write,
2392         .llseek = ftrace_regex_lseek,
2393         .release = ftrace_filter_release,
2394 };
2395
2396 static const struct file_operations ftrace_notrace_fops = {
2397         .open = ftrace_notrace_open,
2398         .read = seq_read,
2399         .write = ftrace_notrace_write,
2400         .llseek = ftrace_regex_lseek,
2401         .release = ftrace_notrace_release,
2402 };
2403
2404 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2405
2406 static DEFINE_MUTEX(graph_lock);
2407
2408 int ftrace_graph_count;
2409 int ftrace_graph_filter_enabled;
2410 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2411
2412 static void *
2413 __g_next(struct seq_file *m, loff_t *pos)
2414 {
2415         if (*pos >= ftrace_graph_count)
2416                 return NULL;
2417         return &ftrace_graph_funcs[*pos];
2418 }
2419
2420 static void *
2421 g_next(struct seq_file *m, void *v, loff_t *pos)
2422 {
2423         (*pos)++;
2424         return __g_next(m, pos);
2425 }
2426
2427 static void *g_start(struct seq_file *m, loff_t *pos)
2428 {
2429         mutex_lock(&graph_lock);
2430
2431         /* Nothing, tell g_show to print all functions are enabled */
2432         if (!ftrace_graph_filter_enabled && !*pos)
2433                 return (void *)1;
2434
2435         return __g_next(m, pos);
2436 }
2437
2438 static void g_stop(struct seq_file *m, void *p)
2439 {
2440         mutex_unlock(&graph_lock);
2441 }
2442
2443 static int g_show(struct seq_file *m, void *v)
2444 {
2445         unsigned long *ptr = v;
2446
2447         if (!ptr)
2448                 return 0;
2449
2450         if (ptr == (unsigned long *)1) {
2451                 seq_printf(m, "#### all functions enabled ####\n");
2452                 return 0;
2453         }
2454
2455         seq_printf(m, "%ps\n", (void *)*ptr);
2456
2457         return 0;
2458 }
2459
2460 static const struct seq_operations ftrace_graph_seq_ops = {
2461         .start = g_start,
2462         .next = g_next,
2463         .stop = g_stop,
2464         .show = g_show,
2465 };
2466
2467 static int
2468 ftrace_graph_open(struct inode *inode, struct file *file)
2469 {
2470         int ret = 0;
2471
2472         if (unlikely(ftrace_disabled))
2473                 return -ENODEV;
2474
2475         mutex_lock(&graph_lock);
2476         if ((file->f_mode & FMODE_WRITE) &&
2477             (file->f_flags & O_TRUNC)) {
2478                 ftrace_graph_filter_enabled = 0;
2479                 ftrace_graph_count = 0;
2480                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2481         }
2482         mutex_unlock(&graph_lock);
2483
2484         if (file->f_mode & FMODE_READ)
2485                 ret = seq_open(file, &ftrace_graph_seq_ops);
2486
2487         return ret;
2488 }
2489
2490 static int
2491 ftrace_graph_release(struct inode *inode, struct file *file)
2492 {
2493         if (file->f_mode & FMODE_READ)
2494                 seq_release(inode, file);
2495         return 0;
2496 }
2497
2498 static int
2499 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2500 {
2501         struct dyn_ftrace *rec;
2502         struct ftrace_page *pg;
2503         int search_len;
2504         int fail = 1;
2505         int type, not;
2506         char *search;
2507         bool exists;
2508         int i;
2509
2510         if (ftrace_disabled)
2511                 return -ENODEV;
2512
2513         /* decode regex */
2514         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2515         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2516                 return -EBUSY;
2517
2518         search_len = strlen(search);
2519
2520         mutex_lock(&ftrace_lock);
2521         do_for_each_ftrace_rec(pg, rec) {
2522
2523                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2524                         continue;
2525
2526                 if (ftrace_match_record(rec, search, search_len, type)) {
2527                         /* if it is in the array */
2528                         exists = false;
2529                         for (i = 0; i < *idx; i++) {
2530                                 if (array[i] == rec->ip) {
2531                                         exists = true;
2532                                         break;
2533                                 }
2534                         }
2535
2536                         if (!not) {
2537                                 fail = 0;
2538                                 if (!exists) {
2539                                         array[(*idx)++] = rec->ip;
2540                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2541                                                 goto out;
2542                                 }
2543                         } else {
2544                                 if (exists) {
2545                                         array[i] = array[--(*idx)];
2546                                         array[*idx] = 0;
2547                                         fail = 0;
2548                                 }
2549                         }
2550                 }
2551         } while_for_each_ftrace_rec();
2552 out:
2553         mutex_unlock(&ftrace_lock);
2554
2555         if (fail)
2556                 return -EINVAL;
2557
2558         ftrace_graph_filter_enabled = 1;
2559         return 0;
2560 }
2561
2562 static ssize_t
2563 ftrace_graph_write(struct file *file, const char __user *ubuf,
2564                    size_t cnt, loff_t *ppos)
2565 {
2566         struct trace_parser parser;
2567         ssize_t read, ret;
2568
2569         if (!cnt)
2570                 return 0;
2571
2572         mutex_lock(&graph_lock);
2573
2574         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2575                 ret = -ENOMEM;
2576                 goto out_unlock;
2577         }
2578
2579         read = trace_get_user(&parser, ubuf, cnt, ppos);
2580
2581         if (read >= 0 && trace_parser_loaded((&parser))) {
2582                 parser.buffer[parser.idx] = 0;
2583
2584                 /* we allow only one expression at a time */
2585                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2586                                         parser.buffer);
2587                 if (ret)
2588                         goto out_free;
2589         }
2590
2591         ret = read;
2592
2593 out_free:
2594         trace_parser_put(&parser);
2595 out_unlock:
2596         mutex_unlock(&graph_lock);
2597
2598         return ret;
2599 }
2600
2601 static const struct file_operations ftrace_graph_fops = {
2602         .open           = ftrace_graph_open,
2603         .read           = seq_read,
2604         .write          = ftrace_graph_write,
2605         .release        = ftrace_graph_release,
2606 };
2607 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2608
2609 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2610 {
2611
2612         trace_create_file("available_filter_functions", 0444,
2613                         d_tracer, NULL, &ftrace_avail_fops);
2614
2615         trace_create_file("failures", 0444,
2616                         d_tracer, NULL, &ftrace_failures_fops);
2617
2618         trace_create_file("set_ftrace_filter", 0644, d_tracer,
2619                         NULL, &ftrace_filter_fops);
2620
2621         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
2622                                     NULL, &ftrace_notrace_fops);
2623
2624 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2625         trace_create_file("set_graph_function", 0444, d_tracer,
2626                                     NULL,
2627                                     &ftrace_graph_fops);
2628 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2629
2630         return 0;
2631 }
2632
2633 static int ftrace_process_locs(struct module *mod,
2634                                unsigned long *start,
2635                                unsigned long *end)
2636 {
2637         unsigned long *p;
2638         unsigned long addr;
2639         unsigned long flags;
2640
2641         mutex_lock(&ftrace_lock);
2642         p = start;
2643         while (p < end) {
2644                 addr = ftrace_call_adjust(*p++);
2645                 /*
2646                  * Some architecture linkers will pad between
2647                  * the different mcount_loc sections of different
2648                  * object files to satisfy alignments.
2649                  * Skip any NULL pointers.
2650                  */
2651                 if (!addr)
2652                         continue;
2653                 ftrace_record_ip(addr);
2654         }
2655
2656         /* disable interrupts to prevent kstop machine */
2657         local_irq_save(flags);
2658         ftrace_update_code(mod);
2659         local_irq_restore(flags);
2660         mutex_unlock(&ftrace_lock);
2661
2662         return 0;
2663 }
2664
2665 #ifdef CONFIG_MODULES
2666 void ftrace_release_mod(struct module *mod)
2667 {
2668         struct dyn_ftrace *rec;
2669         struct ftrace_page *pg;
2670
2671         if (ftrace_disabled)
2672                 return;
2673
2674         mutex_lock(&ftrace_lock);
2675         do_for_each_ftrace_rec(pg, rec) {
2676                 if (within_module_core(rec->ip, mod)) {
2677                         /*
2678                          * rec->ip is changed in ftrace_free_rec()
2679                          * It should not between s and e if record was freed.
2680                          */
2681                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
2682                         ftrace_free_rec(rec);
2683                 }
2684         } while_for_each_ftrace_rec();
2685         mutex_unlock(&ftrace_lock);
2686 }
2687
2688 static void ftrace_init_module(struct module *mod,
2689                                unsigned long *start, unsigned long *end)
2690 {
2691         if (ftrace_disabled || start == end)
2692                 return;
2693         ftrace_process_locs(mod, start, end);
2694 }
2695
2696 static int ftrace_module_notify(struct notifier_block *self,
2697                                 unsigned long val, void *data)
2698 {
2699         struct module *mod = data;
2700
2701         switch (val) {
2702         case MODULE_STATE_COMING:
2703                 ftrace_init_module(mod, mod->ftrace_callsites,
2704                                    mod->ftrace_callsites +
2705                                    mod->num_ftrace_callsites);
2706                 break;
2707         case MODULE_STATE_GOING:
2708                 ftrace_release_mod(mod);
2709                 break;
2710         }
2711
2712         return 0;
2713 }
2714 #else
2715 static int ftrace_module_notify(struct notifier_block *self,
2716                                 unsigned long val, void *data)
2717 {
2718         return 0;
2719 }
2720 #endif /* CONFIG_MODULES */
2721
2722 struct notifier_block ftrace_module_nb = {
2723         .notifier_call = ftrace_module_notify,
2724         .priority = 0,
2725 };
2726
2727 extern unsigned long __start_mcount_loc[];
2728 extern unsigned long __stop_mcount_loc[];
2729
2730 void __init ftrace_init(void)
2731 {
2732         unsigned long count, addr, flags;
2733         int ret;
2734
2735         /* Keep the ftrace pointer to the stub */
2736         addr = (unsigned long)ftrace_stub;
2737
2738         local_irq_save(flags);
2739         ftrace_dyn_arch_init(&addr);
2740         local_irq_restore(flags);
2741
2742         /* ftrace_dyn_arch_init places the return code in addr */
2743         if (addr)
2744                 goto failed;
2745
2746         count = __stop_mcount_loc - __start_mcount_loc;
2747
2748         ret = ftrace_dyn_table_alloc(count);
2749         if (ret)
2750                 goto failed;
2751
2752         last_ftrace_enabled = ftrace_enabled = 1;
2753
2754         ret = ftrace_process_locs(NULL,
2755                                   __start_mcount_loc,
2756                                   __stop_mcount_loc);
2757
2758         ret = register_module_notifier(&ftrace_module_nb);
2759         if (ret)
2760                 pr_warning("Failed to register trace ftrace module notifier\n");
2761
2762         set_ftrace_early_filters();
2763
2764         return;
2765  failed:
2766         ftrace_disabled = 1;
2767 }
2768
2769 #else
2770
2771 static int __init ftrace_nodyn_init(void)
2772 {
2773         ftrace_enabled = 1;
2774         return 0;
2775 }
2776 device_initcall(ftrace_nodyn_init);
2777
2778 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2779 static inline void ftrace_startup_enable(int command) { }
2780 /* Keep as macros so we do not need to define the commands */
2781 # define ftrace_startup(command)        do { } while (0)
2782 # define ftrace_shutdown(command)       do { } while (0)
2783 # define ftrace_startup_sysctl()        do { } while (0)
2784 # define ftrace_shutdown_sysctl()       do { } while (0)
2785 #endif /* CONFIG_DYNAMIC_FTRACE */
2786
2787 static void clear_ftrace_swapper(void)
2788 {
2789         struct task_struct *p;
2790         int cpu;
2791
2792         get_online_cpus();
2793         for_each_online_cpu(cpu) {
2794                 p = idle_task(cpu);
2795                 clear_tsk_trace_trace(p);
2796         }
2797         put_online_cpus();
2798 }
2799
2800 static void set_ftrace_swapper(void)
2801 {
2802         struct task_struct *p;
2803         int cpu;
2804
2805         get_online_cpus();
2806         for_each_online_cpu(cpu) {
2807                 p = idle_task(cpu);
2808                 set_tsk_trace_trace(p);
2809         }
2810         put_online_cpus();
2811 }
2812
2813 static void clear_ftrace_pid(struct pid *pid)
2814 {
2815         struct task_struct *p;
2816
2817         rcu_read_lock();
2818         do_each_pid_task(pid, PIDTYPE_PID, p) {
2819                 clear_tsk_trace_trace(p);
2820         } while_each_pid_task(pid, PIDTYPE_PID, p);
2821         rcu_read_unlock();
2822
2823         put_pid(pid);
2824 }
2825
2826 static void set_ftrace_pid(struct pid *pid)
2827 {
2828         struct task_struct *p;
2829
2830         rcu_read_lock();
2831         do_each_pid_task(pid, PIDTYPE_PID, p) {
2832                 set_tsk_trace_trace(p);
2833         } while_each_pid_task(pid, PIDTYPE_PID, p);
2834         rcu_read_unlock();
2835 }
2836
2837 static void clear_ftrace_pid_task(struct pid *pid)
2838 {
2839         if (pid == ftrace_swapper_pid)
2840                 clear_ftrace_swapper();
2841         else
2842                 clear_ftrace_pid(pid);
2843 }
2844
2845 static void set_ftrace_pid_task(struct pid *pid)
2846 {
2847         if (pid == ftrace_swapper_pid)
2848                 set_ftrace_swapper();
2849         else
2850                 set_ftrace_pid(pid);
2851 }
2852
2853 static int ftrace_pid_add(int p)
2854 {
2855         struct pid *pid;
2856         struct ftrace_pid *fpid;
2857         int ret = -EINVAL;
2858
2859         mutex_lock(&ftrace_lock);
2860
2861         if (!p)
2862                 pid = ftrace_swapper_pid;
2863         else
2864                 pid = find_get_pid(p);
2865
2866         if (!pid)
2867                 goto out;
2868
2869         ret = 0;
2870
2871         list_for_each_entry(fpid, &ftrace_pids, list)
2872                 if (fpid->pid == pid)
2873                         goto out_put;
2874
2875         ret = -ENOMEM;
2876
2877         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2878         if (!fpid)
2879                 goto out_put;
2880
2881         list_add(&fpid->list, &ftrace_pids);
2882         fpid->pid = pid;
2883
2884         set_ftrace_pid_task(pid);
2885
2886         ftrace_update_pid_func();
2887         ftrace_startup_enable(0);
2888
2889         mutex_unlock(&ftrace_lock);
2890         return 0;
2891
2892 out_put:
2893         if (pid != ftrace_swapper_pid)
2894                 put_pid(pid);
2895
2896 out:
2897         mutex_unlock(&ftrace_lock);
2898         return ret;
2899 }
2900
2901 static void ftrace_pid_reset(void)
2902 {
2903         struct ftrace_pid *fpid, *safe;
2904
2905         mutex_lock(&ftrace_lock);
2906         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2907                 struct pid *pid = fpid->pid;
2908
2909                 clear_ftrace_pid_task(pid);
2910
2911                 list_del(&fpid->list);
2912                 kfree(fpid);
2913         }
2914
2915         ftrace_update_pid_func();
2916         ftrace_startup_enable(0);
2917
2918         mutex_unlock(&ftrace_lock);
2919 }
2920
2921 static void *fpid_start(struct seq_file *m, loff_t *pos)
2922 {
2923         mutex_lock(&ftrace_lock);
2924
2925         if (list_empty(&ftrace_pids) && (!*pos))
2926                 return (void *) 1;
2927
2928         return seq_list_start(&ftrace_pids, *pos);
2929 }
2930
2931 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2932 {
2933         if (v == (void *)1)
2934                 return NULL;
2935
2936         return seq_list_next(v, &ftrace_pids, pos);
2937 }
2938
2939 static void fpid_stop(struct seq_file *m, void *p)
2940 {
2941         mutex_unlock(&ftrace_lock);
2942 }
2943
2944 static int fpid_show(struct seq_file *m, void *v)
2945 {
2946         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2947
2948         if (v == (void *)1) {
2949                 seq_printf(m, "no pid\n");
2950                 return 0;
2951         }
2952
2953         if (fpid->pid == ftrace_swapper_pid)
2954                 seq_printf(m, "swapper tasks\n");
2955         else
2956                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2957
2958         return 0;
2959 }
2960
2961 static const struct seq_operations ftrace_pid_sops = {
2962         .start = fpid_start,
2963         .next = fpid_next,
2964         .stop = fpid_stop,
2965         .show = fpid_show,
2966 };
2967
2968 static int
2969 ftrace_pid_open(struct inode *inode, struct file *file)
2970 {
2971         int ret = 0;
2972
2973         if ((file->f_mode & FMODE_WRITE) &&
2974             (file->f_flags & O_TRUNC))
2975                 ftrace_pid_reset();
2976
2977         if (file->f_mode & FMODE_READ)
2978                 ret = seq_open(file, &ftrace_pid_sops);
2979
2980         return ret;
2981 }
2982
2983 static ssize_t
2984 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2985                    size_t cnt, loff_t *ppos)
2986 {
2987         char buf[64], *tmp;
2988         long val;
2989         int ret;
2990
2991         if (cnt >= sizeof(buf))
2992                 return -EINVAL;
2993
2994         if (copy_from_user(&buf, ubuf, cnt))
2995                 return -EFAULT;
2996
2997         buf[cnt] = 0;
2998
2999         /*
3000          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3001          * to clean the filter quietly.
3002          */
3003         tmp = strstrip(buf);
3004         if (strlen(tmp) == 0)
3005                 return 1;
3006
3007         ret = strict_strtol(tmp, 10, &val);
3008         if (ret < 0)
3009                 return ret;
3010
3011         ret = ftrace_pid_add(val);
3012
3013         return ret ? ret : cnt;
3014 }
3015
3016 static int
3017 ftrace_pid_release(struct inode *inode, struct file *file)
3018 {
3019         if (file->f_mode & FMODE_READ)
3020                 seq_release(inode, file);
3021
3022         return 0;
3023 }
3024
3025 static const struct file_operations ftrace_pid_fops = {
3026         .open           = ftrace_pid_open,
3027         .write          = ftrace_pid_write,
3028         .read           = seq_read,
3029         .llseek         = seq_lseek,
3030         .release        = ftrace_pid_release,
3031 };
3032
3033 static __init int ftrace_init_debugfs(void)
3034 {
3035         struct dentry *d_tracer;
3036
3037         d_tracer = tracing_init_dentry();
3038         if (!d_tracer)
3039                 return 0;
3040
3041         ftrace_init_dyn_debugfs(d_tracer);
3042
3043         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3044                             NULL, &ftrace_pid_fops);
3045
3046         ftrace_profile_debugfs(d_tracer);
3047
3048         return 0;
3049 }
3050 fs_initcall(ftrace_init_debugfs);
3051
3052 /**
3053  * ftrace_kill - kill ftrace
3054  *
3055  * This function should be used by panic code. It stops ftrace
3056  * but in a not so nice way. If you need to simply kill ftrace
3057  * from a non-atomic section, use ftrace_kill.
3058  */
3059 void ftrace_kill(void)
3060 {
3061         ftrace_disabled = 1;
3062         ftrace_enabled = 0;
3063         clear_ftrace_function();
3064 }
3065
3066 /**
3067  * register_ftrace_function - register a function for profiling
3068  * @ops - ops structure that holds the function for profiling.
3069  *
3070  * Register a function to be called by all functions in the
3071  * kernel.
3072  *
3073  * Note: @ops->func and all the functions it calls must be labeled
3074  *       with "notrace", otherwise it will go into a
3075  *       recursive loop.
3076  */
3077 int register_ftrace_function(struct ftrace_ops *ops)
3078 {
3079         int ret;
3080
3081         if (unlikely(ftrace_disabled))
3082                 return -1;
3083
3084         mutex_lock(&ftrace_lock);
3085
3086         ret = __register_ftrace_function(ops);
3087         ftrace_startup(0);
3088
3089         mutex_unlock(&ftrace_lock);
3090         return ret;
3091 }
3092
3093 /**
3094  * unregister_ftrace_function - unregister a function for profiling.
3095  * @ops - ops structure that holds the function to unregister
3096  *
3097  * Unregister a function that was added to be called by ftrace profiling.
3098  */
3099 int unregister_ftrace_function(struct ftrace_ops *ops)
3100 {
3101         int ret;
3102
3103         mutex_lock(&ftrace_lock);
3104         ret = __unregister_ftrace_function(ops);
3105         ftrace_shutdown(0);
3106         mutex_unlock(&ftrace_lock);
3107
3108         return ret;
3109 }
3110
3111 int
3112 ftrace_enable_sysctl(struct ctl_table *table, int write,
3113                      void __user *buffer, size_t *lenp,
3114                      loff_t *ppos)
3115 {
3116         int ret;
3117
3118         if (unlikely(ftrace_disabled))
3119                 return -ENODEV;
3120
3121         mutex_lock(&ftrace_lock);
3122
3123         ret  = proc_dointvec(table, write, buffer, lenp, ppos);
3124
3125         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3126                 goto out;
3127
3128         last_ftrace_enabled = !!ftrace_enabled;
3129
3130         if (ftrace_enabled) {
3131
3132                 ftrace_startup_sysctl();
3133
3134                 /* we are starting ftrace again */
3135                 if (ftrace_list != &ftrace_list_end) {
3136                         if (ftrace_list->next == &ftrace_list_end)
3137                                 ftrace_trace_function = ftrace_list->func;
3138                         else
3139                                 ftrace_trace_function = ftrace_list_func;
3140                 }
3141
3142         } else {
3143                 /* stopping ftrace calls (just send to ftrace_stub) */
3144                 ftrace_trace_function = ftrace_stub;
3145
3146                 ftrace_shutdown_sysctl();
3147         }
3148
3149  out:
3150         mutex_unlock(&ftrace_lock);
3151         return ret;
3152 }
3153
3154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3155
3156 static int ftrace_graph_active;
3157 static struct notifier_block ftrace_suspend_notifier;
3158
3159 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3160 {
3161         return 0;
3162 }
3163
3164 /* The callbacks that hook a function */
3165 trace_func_graph_ret_t ftrace_graph_return =
3166                         (trace_func_graph_ret_t)ftrace_stub;
3167 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3168
3169 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3170 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3171 {
3172         int i;
3173         int ret = 0;
3174         unsigned long flags;
3175         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3176         struct task_struct *g, *t;
3177
3178         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3179                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3180                                         * sizeof(struct ftrace_ret_stack),
3181                                         GFP_KERNEL);
3182                 if (!ret_stack_list[i]) {
3183                         start = 0;
3184                         end = i;
3185                         ret = -ENOMEM;
3186                         goto free;
3187                 }
3188         }
3189
3190         read_lock_irqsave(&tasklist_lock, flags);
3191         do_each_thread(g, t) {
3192                 if (start == end) {
3193                         ret = -EAGAIN;
3194                         goto unlock;
3195                 }
3196
3197                 if (t->ret_stack == NULL) {
3198                         atomic_set(&t->tracing_graph_pause, 0);
3199                         atomic_set(&t->trace_overrun, 0);
3200                         t->curr_ret_stack = -1;
3201                         /* Make sure the tasks see the -1 first: */
3202                         smp_wmb();
3203                         t->ret_stack = ret_stack_list[start++];
3204                 }
3205         } while_each_thread(g, t);
3206
3207 unlock:
3208         read_unlock_irqrestore(&tasklist_lock, flags);
3209 free:
3210         for (i = start; i < end; i++)
3211                 kfree(ret_stack_list[i]);
3212         return ret;
3213 }
3214
3215 static void
3216 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3217                                 struct task_struct *next)
3218 {
3219         unsigned long long timestamp;
3220         int index;
3221
3222         /*
3223          * Does the user want to count the time a function was asleep.
3224          * If so, do not update the time stamps.
3225          */
3226         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3227                 return;
3228
3229         timestamp = trace_clock_local();
3230
3231         prev->ftrace_timestamp = timestamp;
3232
3233         /* only process tasks that we timestamped */
3234         if (!next->ftrace_timestamp)
3235                 return;
3236
3237         /*
3238          * Update all the counters in next to make up for the
3239          * time next was sleeping.
3240          */
3241         timestamp -= next->ftrace_timestamp;
3242
3243         for (index = next->curr_ret_stack; index >= 0; index--)
3244                 next->ret_stack[index].calltime += timestamp;
3245 }
3246
3247 /* Allocate a return stack for each task */
3248 static int start_graph_tracing(void)
3249 {
3250         struct ftrace_ret_stack **ret_stack_list;
3251         int ret, cpu;
3252
3253         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3254                                 sizeof(struct ftrace_ret_stack *),
3255                                 GFP_KERNEL);
3256
3257         if (!ret_stack_list)
3258                 return -ENOMEM;
3259
3260         /* The cpu_boot init_task->ret_stack will never be freed */
3261         for_each_online_cpu(cpu) {
3262                 if (!idle_task(cpu)->ret_stack)
3263                         ftrace_graph_init_task(idle_task(cpu));
3264         }
3265
3266         do {
3267                 ret = alloc_retstack_tasklist(ret_stack_list);
3268         } while (ret == -EAGAIN);
3269
3270         if (!ret) {
3271                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3272                 if (ret)
3273                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3274                                 " probe to kernel_sched_switch\n");
3275         }
3276
3277         kfree(ret_stack_list);
3278         return ret;
3279 }
3280
3281 /*
3282  * Hibernation protection.
3283  * The state of the current task is too much unstable during
3284  * suspend/restore to disk. We want to protect against that.
3285  */
3286 static int
3287 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3288                                                         void *unused)
3289 {
3290         switch (state) {
3291         case PM_HIBERNATION_PREPARE:
3292                 pause_graph_tracing();
3293                 break;
3294
3295         case PM_POST_HIBERNATION:
3296                 unpause_graph_tracing();
3297                 break;
3298         }
3299         return NOTIFY_DONE;
3300 }
3301
3302 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3303                         trace_func_graph_ent_t entryfunc)
3304 {
3305         int ret = 0;
3306
3307         mutex_lock(&ftrace_lock);
3308
3309         /* we currently allow only one tracer registered at a time */
3310         if (ftrace_graph_active) {
3311                 ret = -EBUSY;
3312                 goto out;
3313         }
3314
3315         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3316         register_pm_notifier(&ftrace_suspend_notifier);
3317
3318         ftrace_graph_active++;
3319         ret = start_graph_tracing();
3320         if (ret) {
3321                 ftrace_graph_active--;
3322                 goto out;
3323         }
3324
3325         ftrace_graph_return = retfunc;
3326         ftrace_graph_entry = entryfunc;
3327
3328         ftrace_startup(FTRACE_START_FUNC_RET);
3329
3330 out:
3331         mutex_unlock(&ftrace_lock);
3332         return ret;
3333 }
3334
3335 void unregister_ftrace_graph(void)
3336 {
3337         mutex_lock(&ftrace_lock);
3338
3339         if (unlikely(!ftrace_graph_active))
3340                 goto out;
3341
3342         ftrace_graph_active--;
3343         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3344         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3345         ftrace_graph_entry = ftrace_graph_entry_stub;
3346         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3347         unregister_pm_notifier(&ftrace_suspend_notifier);
3348
3349  out:
3350         mutex_unlock(&ftrace_lock);
3351 }
3352
3353 /* Allocate a return stack for newly created task */
3354 void ftrace_graph_init_task(struct task_struct *t)
3355 {
3356         /* Make sure we do not use the parent ret_stack */
3357         t->ret_stack = NULL;
3358
3359         if (ftrace_graph_active) {
3360                 struct ftrace_ret_stack *ret_stack;
3361
3362                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3363                                 * sizeof(struct ftrace_ret_stack),
3364                                 GFP_KERNEL);
3365                 if (!ret_stack)
3366                         return;
3367                 t->curr_ret_stack = -1;
3368                 atomic_set(&t->tracing_graph_pause, 0);
3369                 atomic_set(&t->trace_overrun, 0);
3370                 t->ftrace_timestamp = 0;
3371                 /* make curr_ret_stack visable before we add the ret_stack */
3372                 smp_wmb();
3373                 t->ret_stack = ret_stack;
3374         }
3375 }
3376
3377 void ftrace_graph_exit_task(struct task_struct *t)
3378 {
3379         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3380
3381         t->ret_stack = NULL;
3382         /* NULL must become visible to IRQs before we free it: */
3383         barrier();
3384
3385         kfree(ret_stack);
3386 }
3387
3388 void ftrace_graph_stop(void)
3389 {
3390         ftrace_stop();
3391 }
3392 #endif