tracing: adding function timings to function profiler
[linux-2.6.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <trace/sched.h>
33
34 #include <asm/ftrace.h>
35
36 #include "trace_output.h"
37 #include "trace_stat.h"
38
39 #define FTRACE_WARN_ON(cond)                    \
40         do {                                    \
41                 if (WARN_ON(cond))              \
42                         ftrace_kill();          \
43         } while (0)
44
45 #define FTRACE_WARN_ON_ONCE(cond)               \
46         do {                                    \
47                 if (WARN_ON_ONCE(cond))         \
48                         ftrace_kill();          \
49         } while (0)
50
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
54
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly;
57 static int last_ftrace_enabled;
58
59 /* Quick disabling of function tracer. */
60 int function_trace_stop;
61
62 /*
63  * ftrace_disabled is set when an anomaly is discovered.
64  * ftrace_disabled is much stronger than ftrace_enabled.
65  */
66 static int ftrace_disabled __read_mostly;
67
68 static DEFINE_MUTEX(ftrace_lock);
69
70 static struct ftrace_ops ftrace_list_end __read_mostly =
71 {
72         .func = ftrace_stub,
73 };
74
75 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
76 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
77 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
78 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
79
80 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
81 {
82         struct ftrace_ops *op = ftrace_list;
83
84         /* in case someone actually ports this to alpha! */
85         read_barrier_depends();
86
87         while (op != &ftrace_list_end) {
88                 /* silly alpha */
89                 read_barrier_depends();
90                 op->func(ip, parent_ip);
91                 op = op->next;
92         };
93 }
94
95 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
96 {
97         if (!test_tsk_trace_trace(current))
98                 return;
99
100         ftrace_pid_function(ip, parent_ip);
101 }
102
103 static void set_ftrace_pid_function(ftrace_func_t func)
104 {
105         /* do not set ftrace_pid_function to itself! */
106         if (func != ftrace_pid_func)
107                 ftrace_pid_function = func;
108 }
109
110 /**
111  * clear_ftrace_function - reset the ftrace function
112  *
113  * This NULLs the ftrace function and in essence stops
114  * tracing.  There may be lag
115  */
116 void clear_ftrace_function(void)
117 {
118         ftrace_trace_function = ftrace_stub;
119         __ftrace_trace_function = ftrace_stub;
120         ftrace_pid_function = ftrace_stub;
121 }
122
123 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
124 /*
125  * For those archs that do not test ftrace_trace_stop in their
126  * mcount call site, we need to do it from C.
127  */
128 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
129 {
130         if (function_trace_stop)
131                 return;
132
133         __ftrace_trace_function(ip, parent_ip);
134 }
135 #endif
136
137 static int __register_ftrace_function(struct ftrace_ops *ops)
138 {
139         ops->next = ftrace_list;
140         /*
141          * We are entering ops into the ftrace_list but another
142          * CPU might be walking that list. We need to make sure
143          * the ops->next pointer is valid before another CPU sees
144          * the ops pointer included into the ftrace_list.
145          */
146         smp_wmb();
147         ftrace_list = ops;
148
149         if (ftrace_enabled) {
150                 ftrace_func_t func;
151
152                 if (ops->next == &ftrace_list_end)
153                         func = ops->func;
154                 else
155                         func = ftrace_list_func;
156
157                 if (ftrace_pid_trace) {
158                         set_ftrace_pid_function(func);
159                         func = ftrace_pid_func;
160                 }
161
162                 /*
163                  * For one func, simply call it directly.
164                  * For more than one func, call the chain.
165                  */
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167                 ftrace_trace_function = func;
168 #else
169                 __ftrace_trace_function = func;
170                 ftrace_trace_function = ftrace_test_stop_func;
171 #endif
172         }
173
174         return 0;
175 }
176
177 static int __unregister_ftrace_function(struct ftrace_ops *ops)
178 {
179         struct ftrace_ops **p;
180
181         /*
182          * If we are removing the last function, then simply point
183          * to the ftrace_stub.
184          */
185         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
186                 ftrace_trace_function = ftrace_stub;
187                 ftrace_list = &ftrace_list_end;
188                 return 0;
189         }
190
191         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
192                 if (*p == ops)
193                         break;
194
195         if (*p != ops)
196                 return -1;
197
198         *p = (*p)->next;
199
200         if (ftrace_enabled) {
201                 /* If we only have one func left, then call that directly */
202                 if (ftrace_list->next == &ftrace_list_end) {
203                         ftrace_func_t func = ftrace_list->func;
204
205                         if (ftrace_pid_trace) {
206                                 set_ftrace_pid_function(func);
207                                 func = ftrace_pid_func;
208                         }
209 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210                         ftrace_trace_function = func;
211 #else
212                         __ftrace_trace_function = func;
213 #endif
214                 }
215         }
216
217         return 0;
218 }
219
220 static void ftrace_update_pid_func(void)
221 {
222         ftrace_func_t func;
223
224         if (ftrace_trace_function == ftrace_stub)
225                 return;
226
227         func = ftrace_trace_function;
228
229         if (ftrace_pid_trace) {
230                 set_ftrace_pid_function(func);
231                 func = ftrace_pid_func;
232         } else {
233                 if (func == ftrace_pid_func)
234                         func = ftrace_pid_function;
235         }
236
237 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238         ftrace_trace_function = func;
239 #else
240         __ftrace_trace_function = func;
241 #endif
242 }
243
244 #ifdef CONFIG_FUNCTION_PROFILER
245 struct ftrace_profile {
246         struct hlist_node               node;
247         unsigned long                   ip;
248         unsigned long                   counter;
249 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
250         unsigned long long              time;
251 #endif
252 };
253
254 struct ftrace_profile_page {
255         struct ftrace_profile_page      *next;
256         unsigned long                   index;
257         struct ftrace_profile           records[];
258 };
259
260 #define PROFILE_RECORDS_SIZE                                            \
261         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
262
263 #define PROFILES_PER_PAGE                                       \
264         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
265
266 /* TODO: make these percpu, to prevent cache line bouncing */
267 static struct ftrace_profile_page *profile_pages_start;
268 static struct ftrace_profile_page *profile_pages;
269
270 static struct hlist_head *ftrace_profile_hash;
271 static int ftrace_profile_bits;
272 static int ftrace_profile_enabled;
273 static DEFINE_MUTEX(ftrace_profile_lock);
274
275 static DEFINE_PER_CPU(atomic_t, ftrace_profile_disable);
276
277 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
278
279 static raw_spinlock_t ftrace_profile_rec_lock =
280         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
281
282 static void *
283 function_stat_next(void *v, int idx)
284 {
285         struct ftrace_profile *rec = v;
286         struct ftrace_profile_page *pg;
287
288         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
289
290  again:
291         rec++;
292         if ((void *)rec >= (void *)&pg->records[pg->index]) {
293                 pg = pg->next;
294                 if (!pg)
295                         return NULL;
296                 rec = &pg->records[0];
297                 if (!rec->counter)
298                         goto again;
299         }
300
301         return rec;
302 }
303
304 static void *function_stat_start(struct tracer_stat *trace)
305 {
306         return function_stat_next(&profile_pages_start->records[0], 0);
307 }
308
309 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
310 /* function graph compares on total time */
311 static int function_stat_cmp(void *p1, void *p2)
312 {
313         struct ftrace_profile *a = p1;
314         struct ftrace_profile *b = p2;
315
316         if (a->time < b->time)
317                 return -1;
318         if (a->time > b->time)
319                 return 1;
320         else
321                 return 0;
322 }
323 #else
324 /* not function graph compares against hits */
325 static int function_stat_cmp(void *p1, void *p2)
326 {
327         struct ftrace_profile *a = p1;
328         struct ftrace_profile *b = p2;
329
330         if (a->counter < b->counter)
331                 return -1;
332         if (a->counter > b->counter)
333                 return 1;
334         else
335                 return 0;
336 }
337 #endif
338
339 static int function_stat_headers(struct seq_file *m)
340 {
341 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
342         seq_printf(m, "  Function                               Hit    Time\n"
343                       "  --------                               ---    ----\n");
344 #else
345         seq_printf(m, "  Function                               Hit\n"
346                       "  --------                               ---\n");
347 #endif
348         return 0;
349 }
350
351 static int function_stat_show(struct seq_file *m, void *v)
352 {
353         struct ftrace_profile *rec = v;
354         char str[KSYM_SYMBOL_LEN];
355 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
356         static struct trace_seq s;
357         static DEFINE_MUTEX(mutex);
358
359         mutex_lock(&mutex);
360         trace_seq_init(&s);
361         trace_print_graph_duration(rec->time, &s);
362 #endif
363
364         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
365         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
366
367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
368         seq_printf(m, "    ");
369         trace_print_seq(m, &s);
370         mutex_unlock(&mutex);
371 #endif
372         seq_putc(m, '\n');
373
374         return 0;
375 }
376
377 static struct tracer_stat function_stats = {
378         .name = "functions",
379         .stat_start = function_stat_start,
380         .stat_next = function_stat_next,
381         .stat_cmp = function_stat_cmp,
382         .stat_headers = function_stat_headers,
383         .stat_show = function_stat_show
384 };
385
386 static void ftrace_profile_reset(void)
387 {
388         struct ftrace_profile_page *pg;
389
390         pg = profile_pages = profile_pages_start;
391
392         while (pg) {
393                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
394                 pg->index = 0;
395                 pg = pg->next;
396         }
397
398         memset(ftrace_profile_hash, 0,
399                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
400 }
401
402 int ftrace_profile_pages_init(void)
403 {
404         struct ftrace_profile_page *pg;
405         int i;
406
407         /* If we already allocated, do nothing */
408         if (profile_pages)
409                 return 0;
410
411         profile_pages = (void *)get_zeroed_page(GFP_KERNEL);
412         if (!profile_pages)
413                 return -ENOMEM;
414
415         pg = profile_pages_start = profile_pages;
416
417         /* allocate 10 more pages to start */
418         for (i = 0; i < 10; i++) {
419                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
420                 /*
421                  * We only care about allocating profile_pages, if
422                  * we failed to allocate here, hopefully we will allocate
423                  * later.
424                  */
425                 if (!pg->next)
426                         break;
427                 pg = pg->next;
428         }
429
430         return 0;
431 }
432
433 static int ftrace_profile_init(void)
434 {
435         int size;
436
437         if (ftrace_profile_hash) {
438                 /* If the profile is already created, simply reset it */
439                 ftrace_profile_reset();
440                 return 0;
441         }
442
443         /*
444          * We are profiling all functions, but usually only a few thousand
445          * functions are hit. We'll make a hash of 1024 items.
446          */
447         size = FTRACE_PROFILE_HASH_SIZE;
448
449         ftrace_profile_hash =
450                 kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
451
452         if (!ftrace_profile_hash)
453                 return -ENOMEM;
454
455         size--;
456
457         for (; size; size >>= 1)
458                 ftrace_profile_bits++;
459
460         /* Preallocate a few pages */
461         if (ftrace_profile_pages_init() < 0) {
462                 kfree(ftrace_profile_hash);
463                 ftrace_profile_hash = NULL;
464                 return -ENOMEM;
465         }
466
467         return 0;
468 }
469
470 /* interrupts must be disabled */
471 static struct ftrace_profile *ftrace_find_profiled_func(unsigned long ip)
472 {
473         struct ftrace_profile *rec;
474         struct hlist_head *hhd;
475         struct hlist_node *n;
476         unsigned long key;
477
478         key = hash_long(ip, ftrace_profile_bits);
479         hhd = &ftrace_profile_hash[key];
480
481         if (hlist_empty(hhd))
482                 return NULL;
483
484         hlist_for_each_entry_rcu(rec, n, hhd, node) {
485                 if (rec->ip == ip)
486                         return rec;
487         }
488
489         return NULL;
490 }
491
492 static void ftrace_add_profile(struct ftrace_profile *rec)
493 {
494         unsigned long key;
495
496         key = hash_long(rec->ip, ftrace_profile_bits);
497         hlist_add_head_rcu(&rec->node, &ftrace_profile_hash[key]);
498 }
499
500 /* Interrupts must be disabled calling this */
501 static struct ftrace_profile *
502 ftrace_profile_alloc(unsigned long ip, bool alloc_safe)
503 {
504         struct ftrace_profile *rec = NULL;
505
506         /* prevent recursion */
507         if (atomic_inc_return(&__get_cpu_var(ftrace_profile_disable)) != 1)
508                 goto out;
509
510         __raw_spin_lock(&ftrace_profile_rec_lock);
511
512         /* Try to always keep another page available */
513         if (!profile_pages->next && alloc_safe)
514                 profile_pages->next = (void *)get_zeroed_page(GFP_ATOMIC);
515
516         /*
517          * Try to find the function again since another
518          * task on another CPU could have added it
519          */
520         rec = ftrace_find_profiled_func(ip);
521         if (rec)
522                 goto out_unlock;
523
524         if (profile_pages->index == PROFILES_PER_PAGE) {
525                 if (!profile_pages->next)
526                         goto out_unlock;
527                 profile_pages = profile_pages->next;
528         }
529
530         rec = &profile_pages->records[profile_pages->index++];
531         rec->ip = ip;
532         ftrace_add_profile(rec);
533
534  out_unlock:
535         __raw_spin_unlock(&ftrace_profile_rec_lock);
536  out:
537         atomic_dec(&__get_cpu_var(ftrace_profile_disable));
538
539         return rec;
540 }
541
542 /*
543  * If we are not in an interrupt, or softirq and
544  * and interrupts are disabled and preemption is not enabled
545  * (not in a spinlock) then it should be safe to allocate memory.
546  */
547 static bool ftrace_safe_to_allocate(void)
548 {
549         return !in_interrupt() && irqs_disabled() && !preempt_count();
550 }
551
552 static void
553 function_profile_call(unsigned long ip, unsigned long parent_ip)
554 {
555         struct ftrace_profile *rec;
556         unsigned long flags;
557         bool alloc_safe;
558
559         if (!ftrace_profile_enabled)
560                 return;
561
562         alloc_safe = ftrace_safe_to_allocate();
563
564         local_irq_save(flags);
565         rec = ftrace_find_profiled_func(ip);
566         if (!rec) {
567                 rec = ftrace_profile_alloc(ip, alloc_safe);
568                 if (!rec)
569                         goto out;
570         }
571
572         rec->counter++;
573  out:
574         local_irq_restore(flags);
575 }
576
577 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
578 static int profile_graph_entry(struct ftrace_graph_ent *trace)
579 {
580         function_profile_call(trace->func, 0);
581         return 1;
582 }
583
584 static void profile_graph_return(struct ftrace_graph_ret *trace)
585 {
586         unsigned long flags;
587         struct ftrace_profile *rec;
588
589         local_irq_save(flags);
590         rec = ftrace_find_profiled_func(trace->func);
591         if (rec)
592                 rec->time += trace->rettime - trace->calltime;
593         local_irq_restore(flags);
594 }
595
596 static int register_ftrace_profiler(void)
597 {
598         return register_ftrace_graph(&profile_graph_return,
599                                      &profile_graph_entry);
600 }
601
602 static void unregister_ftrace_profiler(void)
603 {
604         unregister_ftrace_graph();
605 }
606 #else
607 static struct ftrace_ops ftrace_profile_ops __read_mostly =
608 {
609         .func = function_profile_call,
610 };
611
612 static int register_ftrace_profiler(void)
613 {
614         return register_ftrace_function(&ftrace_profile_ops);
615 }
616
617 static void unregister_ftrace_profiler(void)
618 {
619         unregister_ftrace_function(&ftrace_profile_ops);
620 }
621 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
622
623 static ssize_t
624 ftrace_profile_write(struct file *filp, const char __user *ubuf,
625                      size_t cnt, loff_t *ppos)
626 {
627         unsigned long val;
628         char buf[64];
629         int ret;
630
631         if (cnt >= sizeof(buf))
632                 return -EINVAL;
633
634         if (copy_from_user(&buf, ubuf, cnt))
635                 return -EFAULT;
636
637         buf[cnt] = 0;
638
639         ret = strict_strtoul(buf, 10, &val);
640         if (ret < 0)
641                 return ret;
642
643         val = !!val;
644
645         mutex_lock(&ftrace_profile_lock);
646         if (ftrace_profile_enabled ^ val) {
647                 if (val) {
648                         ret = ftrace_profile_init();
649                         if (ret < 0) {
650                                 cnt = ret;
651                                 goto out;
652                         }
653
654                         ret = register_ftrace_profiler();
655                         if (ret < 0) {
656                                 cnt = ret;
657                                 goto out;
658                         }
659                         ftrace_profile_enabled = 1;
660                 } else {
661                         ftrace_profile_enabled = 0;
662                         unregister_ftrace_profiler();
663                 }
664         }
665  out:
666         mutex_unlock(&ftrace_profile_lock);
667
668         filp->f_pos += cnt;
669
670         return cnt;
671 }
672
673 static ssize_t
674 ftrace_profile_read(struct file *filp, char __user *ubuf,
675                      size_t cnt, loff_t *ppos)
676 {
677         char buf[64];
678         int r;
679
680         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
681         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
682 }
683
684 static const struct file_operations ftrace_profile_fops = {
685         .open           = tracing_open_generic,
686         .read           = ftrace_profile_read,
687         .write          = ftrace_profile_write,
688 };
689
690 static void ftrace_profile_debugfs(struct dentry *d_tracer)
691 {
692         struct dentry *entry;
693         int ret;
694
695         ret = register_stat_tracer(&function_stats);
696         if (ret) {
697                 pr_warning("Warning: could not register "
698                            "function stats\n");
699                 return;
700         }
701
702         entry = debugfs_create_file("function_profile_enabled", 0644,
703                                     d_tracer, NULL, &ftrace_profile_fops);
704         if (!entry)
705                 pr_warning("Could not create debugfs "
706                            "'function_profile_enabled' entry\n");
707 }
708
709 #else /* CONFIG_FUNCTION_PROFILER */
710 static void ftrace_profile_debugfs(struct dentry *d_tracer)
711 {
712 }
713 #endif /* CONFIG_FUNCTION_PROFILER */
714
715 /* set when tracing only a pid */
716 struct pid *ftrace_pid_trace;
717 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
718
719 #ifdef CONFIG_DYNAMIC_FTRACE
720
721 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
722 # error Dynamic ftrace depends on MCOUNT_RECORD
723 #endif
724
725 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
726
727 struct ftrace_func_probe {
728         struct hlist_node       node;
729         struct ftrace_probe_ops *ops;
730         unsigned long           flags;
731         unsigned long           ip;
732         void                    *data;
733         struct rcu_head         rcu;
734 };
735
736 enum {
737         FTRACE_ENABLE_CALLS             = (1 << 0),
738         FTRACE_DISABLE_CALLS            = (1 << 1),
739         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
740         FTRACE_ENABLE_MCOUNT            = (1 << 3),
741         FTRACE_DISABLE_MCOUNT           = (1 << 4),
742         FTRACE_START_FUNC_RET           = (1 << 5),
743         FTRACE_STOP_FUNC_RET            = (1 << 6),
744 };
745
746 static int ftrace_filtered;
747
748 static struct dyn_ftrace *ftrace_new_addrs;
749
750 static DEFINE_MUTEX(ftrace_regex_lock);
751
752 struct ftrace_page {
753         struct ftrace_page      *next;
754         int                     index;
755         struct dyn_ftrace       records[];
756 };
757
758 #define ENTRIES_PER_PAGE \
759   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
760
761 /* estimate from running different kernels */
762 #define NR_TO_INIT              10000
763
764 static struct ftrace_page       *ftrace_pages_start;
765 static struct ftrace_page       *ftrace_pages;
766
767 static struct dyn_ftrace *ftrace_free_records;
768
769 /*
770  * This is a double for. Do not use 'break' to break out of the loop,
771  * you must use a goto.
772  */
773 #define do_for_each_ftrace_rec(pg, rec)                                 \
774         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
775                 int _____i;                                             \
776                 for (_____i = 0; _____i < pg->index; _____i++) {        \
777                         rec = &pg->records[_____i];
778
779 #define while_for_each_ftrace_rec()             \
780                 }                               \
781         }
782
783 #ifdef CONFIG_KPROBES
784
785 static int frozen_record_count;
786
787 static inline void freeze_record(struct dyn_ftrace *rec)
788 {
789         if (!(rec->flags & FTRACE_FL_FROZEN)) {
790                 rec->flags |= FTRACE_FL_FROZEN;
791                 frozen_record_count++;
792         }
793 }
794
795 static inline void unfreeze_record(struct dyn_ftrace *rec)
796 {
797         if (rec->flags & FTRACE_FL_FROZEN) {
798                 rec->flags &= ~FTRACE_FL_FROZEN;
799                 frozen_record_count--;
800         }
801 }
802
803 static inline int record_frozen(struct dyn_ftrace *rec)
804 {
805         return rec->flags & FTRACE_FL_FROZEN;
806 }
807 #else
808 # define freeze_record(rec)                     ({ 0; })
809 # define unfreeze_record(rec)                   ({ 0; })
810 # define record_frozen(rec)                     ({ 0; })
811 #endif /* CONFIG_KPROBES */
812
813 static void ftrace_free_rec(struct dyn_ftrace *rec)
814 {
815         rec->freelist = ftrace_free_records;
816         ftrace_free_records = rec;
817         rec->flags |= FTRACE_FL_FREE;
818 }
819
820 void ftrace_release(void *start, unsigned long size)
821 {
822         struct dyn_ftrace *rec;
823         struct ftrace_page *pg;
824         unsigned long s = (unsigned long)start;
825         unsigned long e = s + size;
826
827         if (ftrace_disabled || !start)
828                 return;
829
830         mutex_lock(&ftrace_lock);
831         do_for_each_ftrace_rec(pg, rec) {
832                 if ((rec->ip >= s) && (rec->ip < e) &&
833                     !(rec->flags & FTRACE_FL_FREE))
834                         ftrace_free_rec(rec);
835         } while_for_each_ftrace_rec();
836         mutex_unlock(&ftrace_lock);
837 }
838
839 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
840 {
841         struct dyn_ftrace *rec;
842
843         /* First check for freed records */
844         if (ftrace_free_records) {
845                 rec = ftrace_free_records;
846
847                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
848                         FTRACE_WARN_ON_ONCE(1);
849                         ftrace_free_records = NULL;
850                         return NULL;
851                 }
852
853                 ftrace_free_records = rec->freelist;
854                 memset(rec, 0, sizeof(*rec));
855                 return rec;
856         }
857
858         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
859                 if (!ftrace_pages->next) {
860                         /* allocate another page */
861                         ftrace_pages->next =
862                                 (void *)get_zeroed_page(GFP_KERNEL);
863                         if (!ftrace_pages->next)
864                                 return NULL;
865                 }
866                 ftrace_pages = ftrace_pages->next;
867         }
868
869         return &ftrace_pages->records[ftrace_pages->index++];
870 }
871
872 static struct dyn_ftrace *
873 ftrace_record_ip(unsigned long ip)
874 {
875         struct dyn_ftrace *rec;
876
877         if (ftrace_disabled)
878                 return NULL;
879
880         rec = ftrace_alloc_dyn_node(ip);
881         if (!rec)
882                 return NULL;
883
884         rec->ip = ip;
885         rec->newlist = ftrace_new_addrs;
886         ftrace_new_addrs = rec;
887
888         return rec;
889 }
890
891 static void print_ip_ins(const char *fmt, unsigned char *p)
892 {
893         int i;
894
895         printk(KERN_CONT "%s", fmt);
896
897         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
898                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
899 }
900
901 static void ftrace_bug(int failed, unsigned long ip)
902 {
903         switch (failed) {
904         case -EFAULT:
905                 FTRACE_WARN_ON_ONCE(1);
906                 pr_info("ftrace faulted on modifying ");
907                 print_ip_sym(ip);
908                 break;
909         case -EINVAL:
910                 FTRACE_WARN_ON_ONCE(1);
911                 pr_info("ftrace failed to modify ");
912                 print_ip_sym(ip);
913                 print_ip_ins(" actual: ", (unsigned char *)ip);
914                 printk(KERN_CONT "\n");
915                 break;
916         case -EPERM:
917                 FTRACE_WARN_ON_ONCE(1);
918                 pr_info("ftrace faulted on writing ");
919                 print_ip_sym(ip);
920                 break;
921         default:
922                 FTRACE_WARN_ON_ONCE(1);
923                 pr_info("ftrace faulted on unknown error ");
924                 print_ip_sym(ip);
925         }
926 }
927
928
929 static int
930 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
931 {
932         unsigned long ftrace_addr;
933         unsigned long ip, fl;
934
935         ftrace_addr = (unsigned long)FTRACE_ADDR;
936
937         ip = rec->ip;
938
939         /*
940          * If this record is not to be traced and
941          * it is not enabled then do nothing.
942          *
943          * If this record is not to be traced and
944          * it is enabled then disable it.
945          *
946          */
947         if (rec->flags & FTRACE_FL_NOTRACE) {
948                 if (rec->flags & FTRACE_FL_ENABLED)
949                         rec->flags &= ~FTRACE_FL_ENABLED;
950                 else
951                         return 0;
952
953         } else if (ftrace_filtered && enable) {
954                 /*
955                  * Filtering is on:
956                  */
957
958                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
959
960                 /* Record is filtered and enabled, do nothing */
961                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
962                         return 0;
963
964                 /* Record is not filtered or enabled, do nothing */
965                 if (!fl)
966                         return 0;
967
968                 /* Record is not filtered but enabled, disable it */
969                 if (fl == FTRACE_FL_ENABLED)
970                         rec->flags &= ~FTRACE_FL_ENABLED;
971                 else
972                 /* Otherwise record is filtered but not enabled, enable it */
973                         rec->flags |= FTRACE_FL_ENABLED;
974         } else {
975                 /* Disable or not filtered */
976
977                 if (enable) {
978                         /* if record is enabled, do nothing */
979                         if (rec->flags & FTRACE_FL_ENABLED)
980                                 return 0;
981
982                         rec->flags |= FTRACE_FL_ENABLED;
983
984                 } else {
985
986                         /* if record is not enabled, do nothing */
987                         if (!(rec->flags & FTRACE_FL_ENABLED))
988                                 return 0;
989
990                         rec->flags &= ~FTRACE_FL_ENABLED;
991                 }
992         }
993
994         if (rec->flags & FTRACE_FL_ENABLED)
995                 return ftrace_make_call(rec, ftrace_addr);
996         else
997                 return ftrace_make_nop(NULL, rec, ftrace_addr);
998 }
999
1000 static void ftrace_replace_code(int enable)
1001 {
1002         struct dyn_ftrace *rec;
1003         struct ftrace_page *pg;
1004         int failed;
1005
1006         do_for_each_ftrace_rec(pg, rec) {
1007                 /*
1008                  * Skip over free records, records that have
1009                  * failed and not converted.
1010                  */
1011                 if (rec->flags & FTRACE_FL_FREE ||
1012                     rec->flags & FTRACE_FL_FAILED ||
1013                     !(rec->flags & FTRACE_FL_CONVERTED))
1014                         continue;
1015
1016                 /* ignore updates to this record's mcount site */
1017                 if (get_kprobe((void *)rec->ip)) {
1018                         freeze_record(rec);
1019                         continue;
1020                 } else {
1021                         unfreeze_record(rec);
1022                 }
1023
1024                 failed = __ftrace_replace_code(rec, enable);
1025                 if (failed) {
1026                         rec->flags |= FTRACE_FL_FAILED;
1027                         if ((system_state == SYSTEM_BOOTING) ||
1028                             !core_kernel_text(rec->ip)) {
1029                                 ftrace_free_rec(rec);
1030                                 } else {
1031                                 ftrace_bug(failed, rec->ip);
1032                                         /* Stop processing */
1033                                         return;
1034                                 }
1035                 }
1036         } while_for_each_ftrace_rec();
1037 }
1038
1039 static int
1040 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1041 {
1042         unsigned long ip;
1043         int ret;
1044
1045         ip = rec->ip;
1046
1047         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1048         if (ret) {
1049                 ftrace_bug(ret, ip);
1050                 rec->flags |= FTRACE_FL_FAILED;
1051                 return 0;
1052         }
1053         return 1;
1054 }
1055
1056 /*
1057  * archs can override this function if they must do something
1058  * before the modifying code is performed.
1059  */
1060 int __weak ftrace_arch_code_modify_prepare(void)
1061 {
1062         return 0;
1063 }
1064
1065 /*
1066  * archs can override this function if they must do something
1067  * after the modifying code is performed.
1068  */
1069 int __weak ftrace_arch_code_modify_post_process(void)
1070 {
1071         return 0;
1072 }
1073
1074 static int __ftrace_modify_code(void *data)
1075 {
1076         int *command = data;
1077
1078         if (*command & FTRACE_ENABLE_CALLS)
1079                 ftrace_replace_code(1);
1080         else if (*command & FTRACE_DISABLE_CALLS)
1081                 ftrace_replace_code(0);
1082
1083         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1084                 ftrace_update_ftrace_func(ftrace_trace_function);
1085
1086         if (*command & FTRACE_START_FUNC_RET)
1087                 ftrace_enable_ftrace_graph_caller();
1088         else if (*command & FTRACE_STOP_FUNC_RET)
1089                 ftrace_disable_ftrace_graph_caller();
1090
1091         return 0;
1092 }
1093
1094 static void ftrace_run_update_code(int command)
1095 {
1096         int ret;
1097
1098         ret = ftrace_arch_code_modify_prepare();
1099         FTRACE_WARN_ON(ret);
1100         if (ret)
1101                 return;
1102
1103         stop_machine(__ftrace_modify_code, &command, NULL);
1104
1105         ret = ftrace_arch_code_modify_post_process();
1106         FTRACE_WARN_ON(ret);
1107 }
1108
1109 static ftrace_func_t saved_ftrace_func;
1110 static int ftrace_start_up;
1111
1112 static void ftrace_startup_enable(int command)
1113 {
1114         if (saved_ftrace_func != ftrace_trace_function) {
1115                 saved_ftrace_func = ftrace_trace_function;
1116                 command |= FTRACE_UPDATE_TRACE_FUNC;
1117         }
1118
1119         if (!command || !ftrace_enabled)
1120                 return;
1121
1122         ftrace_run_update_code(command);
1123 }
1124
1125 static void ftrace_startup(int command)
1126 {
1127         if (unlikely(ftrace_disabled))
1128                 return;
1129
1130         ftrace_start_up++;
1131         command |= FTRACE_ENABLE_CALLS;
1132
1133         ftrace_startup_enable(command);
1134 }
1135
1136 static void ftrace_shutdown(int command)
1137 {
1138         if (unlikely(ftrace_disabled))
1139                 return;
1140
1141         ftrace_start_up--;
1142         if (!ftrace_start_up)
1143                 command |= FTRACE_DISABLE_CALLS;
1144
1145         if (saved_ftrace_func != ftrace_trace_function) {
1146                 saved_ftrace_func = ftrace_trace_function;
1147                 command |= FTRACE_UPDATE_TRACE_FUNC;
1148         }
1149
1150         if (!command || !ftrace_enabled)
1151                 return;
1152
1153         ftrace_run_update_code(command);
1154 }
1155
1156 static void ftrace_startup_sysctl(void)
1157 {
1158         int command = FTRACE_ENABLE_MCOUNT;
1159
1160         if (unlikely(ftrace_disabled))
1161                 return;
1162
1163         /* Force update next time */
1164         saved_ftrace_func = NULL;
1165         /* ftrace_start_up is true if we want ftrace running */
1166         if (ftrace_start_up)
1167                 command |= FTRACE_ENABLE_CALLS;
1168
1169         ftrace_run_update_code(command);
1170 }
1171
1172 static void ftrace_shutdown_sysctl(void)
1173 {
1174         int command = FTRACE_DISABLE_MCOUNT;
1175
1176         if (unlikely(ftrace_disabled))
1177                 return;
1178
1179         /* ftrace_start_up is true if ftrace is running */
1180         if (ftrace_start_up)
1181                 command |= FTRACE_DISABLE_CALLS;
1182
1183         ftrace_run_update_code(command);
1184 }
1185
1186 static cycle_t          ftrace_update_time;
1187 static unsigned long    ftrace_update_cnt;
1188 unsigned long           ftrace_update_tot_cnt;
1189
1190 static int ftrace_update_code(struct module *mod)
1191 {
1192         struct dyn_ftrace *p;
1193         cycle_t start, stop;
1194
1195         start = ftrace_now(raw_smp_processor_id());
1196         ftrace_update_cnt = 0;
1197
1198         while (ftrace_new_addrs) {
1199
1200                 /* If something went wrong, bail without enabling anything */
1201                 if (unlikely(ftrace_disabled))
1202                         return -1;
1203
1204                 p = ftrace_new_addrs;
1205                 ftrace_new_addrs = p->newlist;
1206                 p->flags = 0L;
1207
1208                 /* convert record (i.e, patch mcount-call with NOP) */
1209                 if (ftrace_code_disable(mod, p)) {
1210                         p->flags |= FTRACE_FL_CONVERTED;
1211                         ftrace_update_cnt++;
1212                 } else
1213                         ftrace_free_rec(p);
1214         }
1215
1216         stop = ftrace_now(raw_smp_processor_id());
1217         ftrace_update_time = stop - start;
1218         ftrace_update_tot_cnt += ftrace_update_cnt;
1219
1220         return 0;
1221 }
1222
1223 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1224 {
1225         struct ftrace_page *pg;
1226         int cnt;
1227         int i;
1228
1229         /* allocate a few pages */
1230         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1231         if (!ftrace_pages_start)
1232                 return -1;
1233
1234         /*
1235          * Allocate a few more pages.
1236          *
1237          * TODO: have some parser search vmlinux before
1238          *   final linking to find all calls to ftrace.
1239          *   Then we can:
1240          *    a) know how many pages to allocate.
1241          *     and/or
1242          *    b) set up the table then.
1243          *
1244          *  The dynamic code is still necessary for
1245          *  modules.
1246          */
1247
1248         pg = ftrace_pages = ftrace_pages_start;
1249
1250         cnt = num_to_init / ENTRIES_PER_PAGE;
1251         pr_info("ftrace: allocating %ld entries in %d pages\n",
1252                 num_to_init, cnt + 1);
1253
1254         for (i = 0; i < cnt; i++) {
1255                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1256
1257                 /* If we fail, we'll try later anyway */
1258                 if (!pg->next)
1259                         break;
1260
1261                 pg = pg->next;
1262         }
1263
1264         return 0;
1265 }
1266
1267 enum {
1268         FTRACE_ITER_FILTER      = (1 << 0),
1269         FTRACE_ITER_CONT        = (1 << 1),
1270         FTRACE_ITER_NOTRACE     = (1 << 2),
1271         FTRACE_ITER_FAILURES    = (1 << 3),
1272         FTRACE_ITER_PRINTALL    = (1 << 4),
1273         FTRACE_ITER_HASH        = (1 << 5),
1274 };
1275
1276 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1277
1278 struct ftrace_iterator {
1279         struct ftrace_page      *pg;
1280         int                     hidx;
1281         int                     idx;
1282         unsigned                flags;
1283         unsigned char           buffer[FTRACE_BUFF_MAX+1];
1284         unsigned                buffer_idx;
1285         unsigned                filtered;
1286 };
1287
1288 static void *
1289 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
1290 {
1291         struct ftrace_iterator *iter = m->private;
1292         struct hlist_node *hnd = v;
1293         struct hlist_head *hhd;
1294
1295         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
1296
1297         (*pos)++;
1298
1299  retry:
1300         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1301                 return NULL;
1302
1303         hhd = &ftrace_func_hash[iter->hidx];
1304
1305         if (hlist_empty(hhd)) {
1306                 iter->hidx++;
1307                 hnd = NULL;
1308                 goto retry;
1309         }
1310
1311         if (!hnd)
1312                 hnd = hhd->first;
1313         else {
1314                 hnd = hnd->next;
1315                 if (!hnd) {
1316                         iter->hidx++;
1317                         goto retry;
1318                 }
1319         }
1320
1321         return hnd;
1322 }
1323
1324 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1325 {
1326         struct ftrace_iterator *iter = m->private;
1327         void *p = NULL;
1328
1329         iter->flags |= FTRACE_ITER_HASH;
1330
1331         return t_hash_next(m, p, pos);
1332 }
1333
1334 static int t_hash_show(struct seq_file *m, void *v)
1335 {
1336         struct ftrace_func_probe *rec;
1337         struct hlist_node *hnd = v;
1338         char str[KSYM_SYMBOL_LEN];
1339
1340         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1341
1342         if (rec->ops->print)
1343                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1344
1345         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1346         seq_printf(m, "%s:", str);
1347
1348         kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1349         seq_printf(m, "%s", str);
1350
1351         if (rec->data)
1352                 seq_printf(m, ":%p", rec->data);
1353         seq_putc(m, '\n');
1354
1355         return 0;
1356 }
1357
1358 static void *
1359 t_next(struct seq_file *m, void *v, loff_t *pos)
1360 {
1361         struct ftrace_iterator *iter = m->private;
1362         struct dyn_ftrace *rec = NULL;
1363
1364         if (iter->flags & FTRACE_ITER_HASH)
1365                 return t_hash_next(m, v, pos);
1366
1367         (*pos)++;
1368
1369         if (iter->flags & FTRACE_ITER_PRINTALL)
1370                 return NULL;
1371
1372  retry:
1373         if (iter->idx >= iter->pg->index) {
1374                 if (iter->pg->next) {
1375                         iter->pg = iter->pg->next;
1376                         iter->idx = 0;
1377                         goto retry;
1378                 } else {
1379                         iter->idx = -1;
1380                 }
1381         } else {
1382                 rec = &iter->pg->records[iter->idx++];
1383                 if ((rec->flags & FTRACE_FL_FREE) ||
1384
1385                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
1386                      (rec->flags & FTRACE_FL_FAILED)) ||
1387
1388                     ((iter->flags & FTRACE_ITER_FAILURES) &&
1389                      !(rec->flags & FTRACE_FL_FAILED)) ||
1390
1391                     ((iter->flags & FTRACE_ITER_FILTER) &&
1392                      !(rec->flags & FTRACE_FL_FILTER)) ||
1393
1394                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1395                      !(rec->flags & FTRACE_FL_NOTRACE))) {
1396                         rec = NULL;
1397                         goto retry;
1398                 }
1399         }
1400
1401         return rec;
1402 }
1403
1404 static void *t_start(struct seq_file *m, loff_t *pos)
1405 {
1406         struct ftrace_iterator *iter = m->private;
1407         void *p = NULL;
1408
1409         mutex_lock(&ftrace_lock);
1410         /*
1411          * For set_ftrace_filter reading, if we have the filter
1412          * off, we can short cut and just print out that all
1413          * functions are enabled.
1414          */
1415         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
1416                 if (*pos > 0)
1417                         return t_hash_start(m, pos);
1418                 iter->flags |= FTRACE_ITER_PRINTALL;
1419                 (*pos)++;
1420                 return iter;
1421         }
1422
1423         if (iter->flags & FTRACE_ITER_HASH)
1424                 return t_hash_start(m, pos);
1425
1426         if (*pos > 0) {
1427                 if (iter->idx < 0)
1428                         return p;
1429                 (*pos)--;
1430                 iter->idx--;
1431         }
1432
1433         p = t_next(m, p, pos);
1434
1435         if (!p)
1436                 return t_hash_start(m, pos);
1437
1438         return p;
1439 }
1440
1441 static void t_stop(struct seq_file *m, void *p)
1442 {
1443         mutex_unlock(&ftrace_lock);
1444 }
1445
1446 static int t_show(struct seq_file *m, void *v)
1447 {
1448         struct ftrace_iterator *iter = m->private;
1449         struct dyn_ftrace *rec = v;
1450         char str[KSYM_SYMBOL_LEN];
1451
1452         if (iter->flags & FTRACE_ITER_HASH)
1453                 return t_hash_show(m, v);
1454
1455         if (iter->flags & FTRACE_ITER_PRINTALL) {
1456                 seq_printf(m, "#### all functions enabled ####\n");
1457                 return 0;
1458         }
1459
1460         if (!rec)
1461                 return 0;
1462
1463         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1464
1465         seq_printf(m, "%s\n", str);
1466
1467         return 0;
1468 }
1469
1470 static struct seq_operations show_ftrace_seq_ops = {
1471         .start = t_start,
1472         .next = t_next,
1473         .stop = t_stop,
1474         .show = t_show,
1475 };
1476
1477 static int
1478 ftrace_avail_open(struct inode *inode, struct file *file)
1479 {
1480         struct ftrace_iterator *iter;
1481         int ret;
1482
1483         if (unlikely(ftrace_disabled))
1484                 return -ENODEV;
1485
1486         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1487         if (!iter)
1488                 return -ENOMEM;
1489
1490         iter->pg = ftrace_pages_start;
1491
1492         ret = seq_open(file, &show_ftrace_seq_ops);
1493         if (!ret) {
1494                 struct seq_file *m = file->private_data;
1495
1496                 m->private = iter;
1497         } else {
1498                 kfree(iter);
1499         }
1500
1501         return ret;
1502 }
1503
1504 int ftrace_avail_release(struct inode *inode, struct file *file)
1505 {
1506         struct seq_file *m = (struct seq_file *)file->private_data;
1507         struct ftrace_iterator *iter = m->private;
1508
1509         seq_release(inode, file);
1510         kfree(iter);
1511
1512         return 0;
1513 }
1514
1515 static int
1516 ftrace_failures_open(struct inode *inode, struct file *file)
1517 {
1518         int ret;
1519         struct seq_file *m;
1520         struct ftrace_iterator *iter;
1521
1522         ret = ftrace_avail_open(inode, file);
1523         if (!ret) {
1524                 m = (struct seq_file *)file->private_data;
1525                 iter = (struct ftrace_iterator *)m->private;
1526                 iter->flags = FTRACE_ITER_FAILURES;
1527         }
1528
1529         return ret;
1530 }
1531
1532
1533 static void ftrace_filter_reset(int enable)
1534 {
1535         struct ftrace_page *pg;
1536         struct dyn_ftrace *rec;
1537         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1538
1539         mutex_lock(&ftrace_lock);
1540         if (enable)
1541                 ftrace_filtered = 0;
1542         do_for_each_ftrace_rec(pg, rec) {
1543                 if (rec->flags & FTRACE_FL_FAILED)
1544                         continue;
1545                 rec->flags &= ~type;
1546         } while_for_each_ftrace_rec();
1547         mutex_unlock(&ftrace_lock);
1548 }
1549
1550 static int
1551 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1552 {
1553         struct ftrace_iterator *iter;
1554         int ret = 0;
1555
1556         if (unlikely(ftrace_disabled))
1557                 return -ENODEV;
1558
1559         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1560         if (!iter)
1561                 return -ENOMEM;
1562
1563         mutex_lock(&ftrace_regex_lock);
1564         if ((file->f_mode & FMODE_WRITE) &&
1565             !(file->f_flags & O_APPEND))
1566                 ftrace_filter_reset(enable);
1567
1568         if (file->f_mode & FMODE_READ) {
1569                 iter->pg = ftrace_pages_start;
1570                 iter->flags = enable ? FTRACE_ITER_FILTER :
1571                         FTRACE_ITER_NOTRACE;
1572
1573                 ret = seq_open(file, &show_ftrace_seq_ops);
1574                 if (!ret) {
1575                         struct seq_file *m = file->private_data;
1576                         m->private = iter;
1577                 } else
1578                         kfree(iter);
1579         } else
1580                 file->private_data = iter;
1581         mutex_unlock(&ftrace_regex_lock);
1582
1583         return ret;
1584 }
1585
1586 static int
1587 ftrace_filter_open(struct inode *inode, struct file *file)
1588 {
1589         return ftrace_regex_open(inode, file, 1);
1590 }
1591
1592 static int
1593 ftrace_notrace_open(struct inode *inode, struct file *file)
1594 {
1595         return ftrace_regex_open(inode, file, 0);
1596 }
1597
1598 static loff_t
1599 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1600 {
1601         loff_t ret;
1602
1603         if (file->f_mode & FMODE_READ)
1604                 ret = seq_lseek(file, offset, origin);
1605         else
1606                 file->f_pos = ret = 1;
1607
1608         return ret;
1609 }
1610
1611 enum {
1612         MATCH_FULL,
1613         MATCH_FRONT_ONLY,
1614         MATCH_MIDDLE_ONLY,
1615         MATCH_END_ONLY,
1616 };
1617
1618 /*
1619  * (static function - no need for kernel doc)
1620  *
1621  * Pass in a buffer containing a glob and this function will
1622  * set search to point to the search part of the buffer and
1623  * return the type of search it is (see enum above).
1624  * This does modify buff.
1625  *
1626  * Returns enum type.
1627  *  search returns the pointer to use for comparison.
1628  *  not returns 1 if buff started with a '!'
1629  *     0 otherwise.
1630  */
1631 static int
1632 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1633 {
1634         int type = MATCH_FULL;
1635         int i;
1636
1637         if (buff[0] == '!') {
1638                 *not = 1;
1639                 buff++;
1640                 len--;
1641         } else
1642                 *not = 0;
1643
1644         *search = buff;
1645
1646         for (i = 0; i < len; i++) {
1647                 if (buff[i] == '*') {
1648                         if (!i) {
1649                                 *search = buff + 1;
1650                                 type = MATCH_END_ONLY;
1651                         } else {
1652                                 if (type == MATCH_END_ONLY)
1653                                         type = MATCH_MIDDLE_ONLY;
1654                                 else
1655                                         type = MATCH_FRONT_ONLY;
1656                                 buff[i] = 0;
1657                                 break;
1658                         }
1659                 }
1660         }
1661
1662         return type;
1663 }
1664
1665 static int ftrace_match(char *str, char *regex, int len, int type)
1666 {
1667         int matched = 0;
1668         char *ptr;
1669
1670         switch (type) {
1671         case MATCH_FULL:
1672                 if (strcmp(str, regex) == 0)
1673                         matched = 1;
1674                 break;
1675         case MATCH_FRONT_ONLY:
1676                 if (strncmp(str, regex, len) == 0)
1677                         matched = 1;
1678                 break;
1679         case MATCH_MIDDLE_ONLY:
1680                 if (strstr(str, regex))
1681                         matched = 1;
1682                 break;
1683         case MATCH_END_ONLY:
1684                 ptr = strstr(str, regex);
1685                 if (ptr && (ptr[len] == 0))
1686                         matched = 1;
1687                 break;
1688         }
1689
1690         return matched;
1691 }
1692
1693 static int
1694 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1695 {
1696         char str[KSYM_SYMBOL_LEN];
1697
1698         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1699         return ftrace_match(str, regex, len, type);
1700 }
1701
1702 static void ftrace_match_records(char *buff, int len, int enable)
1703 {
1704         unsigned int search_len;
1705         struct ftrace_page *pg;
1706         struct dyn_ftrace *rec;
1707         unsigned long flag;
1708         char *search;
1709         int type;
1710         int not;
1711
1712         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1713         type = ftrace_setup_glob(buff, len, &search, &not);
1714
1715         search_len = strlen(search);
1716
1717         mutex_lock(&ftrace_lock);
1718         do_for_each_ftrace_rec(pg, rec) {
1719
1720                 if (rec->flags & FTRACE_FL_FAILED)
1721                         continue;
1722
1723                 if (ftrace_match_record(rec, search, search_len, type)) {
1724                         if (not)
1725                                 rec->flags &= ~flag;
1726                         else
1727                                 rec->flags |= flag;
1728                 }
1729                 /*
1730                  * Only enable filtering if we have a function that
1731                  * is filtered on.
1732                  */
1733                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1734                         ftrace_filtered = 1;
1735         } while_for_each_ftrace_rec();
1736         mutex_unlock(&ftrace_lock);
1737 }
1738
1739 static int
1740 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1741                            char *regex, int len, int type)
1742 {
1743         char str[KSYM_SYMBOL_LEN];
1744         char *modname;
1745
1746         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1747
1748         if (!modname || strcmp(modname, mod))
1749                 return 0;
1750
1751         /* blank search means to match all funcs in the mod */
1752         if (len)
1753                 return ftrace_match(str, regex, len, type);
1754         else
1755                 return 1;
1756 }
1757
1758 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1759 {
1760         unsigned search_len = 0;
1761         struct ftrace_page *pg;
1762         struct dyn_ftrace *rec;
1763         int type = MATCH_FULL;
1764         char *search = buff;
1765         unsigned long flag;
1766         int not = 0;
1767
1768         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1769
1770         /* blank or '*' mean the same */
1771         if (strcmp(buff, "*") == 0)
1772                 buff[0] = 0;
1773
1774         /* handle the case of 'dont filter this module' */
1775         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1776                 buff[0] = 0;
1777                 not = 1;
1778         }
1779
1780         if (strlen(buff)) {
1781                 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1782                 search_len = strlen(search);
1783         }
1784
1785         mutex_lock(&ftrace_lock);
1786         do_for_each_ftrace_rec(pg, rec) {
1787
1788                 if (rec->flags & FTRACE_FL_FAILED)
1789                         continue;
1790
1791                 if (ftrace_match_module_record(rec, mod,
1792                                                search, search_len, type)) {
1793                         if (not)
1794                                 rec->flags &= ~flag;
1795                         else
1796                                 rec->flags |= flag;
1797                 }
1798                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1799                         ftrace_filtered = 1;
1800
1801         } while_for_each_ftrace_rec();
1802         mutex_unlock(&ftrace_lock);
1803 }
1804
1805 /*
1806  * We register the module command as a template to show others how
1807  * to register the a command as well.
1808  */
1809
1810 static int
1811 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1812 {
1813         char *mod;
1814
1815         /*
1816          * cmd == 'mod' because we only registered this func
1817          * for the 'mod' ftrace_func_command.
1818          * But if you register one func with multiple commands,
1819          * you can tell which command was used by the cmd
1820          * parameter.
1821          */
1822
1823         /* we must have a module name */
1824         if (!param)
1825                 return -EINVAL;
1826
1827         mod = strsep(&param, ":");
1828         if (!strlen(mod))
1829                 return -EINVAL;
1830
1831         ftrace_match_module_records(func, mod, enable);
1832         return 0;
1833 }
1834
1835 static struct ftrace_func_command ftrace_mod_cmd = {
1836         .name                   = "mod",
1837         .func                   = ftrace_mod_callback,
1838 };
1839
1840 static int __init ftrace_mod_cmd_init(void)
1841 {
1842         return register_ftrace_command(&ftrace_mod_cmd);
1843 }
1844 device_initcall(ftrace_mod_cmd_init);
1845
1846 static void
1847 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1848 {
1849         struct ftrace_func_probe *entry;
1850         struct hlist_head *hhd;
1851         struct hlist_node *n;
1852         unsigned long key;
1853         int resched;
1854
1855         key = hash_long(ip, FTRACE_HASH_BITS);
1856
1857         hhd = &ftrace_func_hash[key];
1858
1859         if (hlist_empty(hhd))
1860                 return;
1861
1862         /*
1863          * Disable preemption for these calls to prevent a RCU grace
1864          * period. This syncs the hash iteration and freeing of items
1865          * on the hash. rcu_read_lock is too dangerous here.
1866          */
1867         resched = ftrace_preempt_disable();
1868         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1869                 if (entry->ip == ip)
1870                         entry->ops->func(ip, parent_ip, &entry->data);
1871         }
1872         ftrace_preempt_enable(resched);
1873 }
1874
1875 static struct ftrace_ops trace_probe_ops __read_mostly =
1876 {
1877         .func = function_trace_probe_call,
1878 };
1879
1880 static int ftrace_probe_registered;
1881
1882 static void __enable_ftrace_function_probe(void)
1883 {
1884         int i;
1885
1886         if (ftrace_probe_registered)
1887                 return;
1888
1889         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1890                 struct hlist_head *hhd = &ftrace_func_hash[i];
1891                 if (hhd->first)
1892                         break;
1893         }
1894         /* Nothing registered? */
1895         if (i == FTRACE_FUNC_HASHSIZE)
1896                 return;
1897
1898         __register_ftrace_function(&trace_probe_ops);
1899         ftrace_startup(0);
1900         ftrace_probe_registered = 1;
1901 }
1902
1903 static void __disable_ftrace_function_probe(void)
1904 {
1905         int i;
1906
1907         if (!ftrace_probe_registered)
1908                 return;
1909
1910         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1911                 struct hlist_head *hhd = &ftrace_func_hash[i];
1912                 if (hhd->first)
1913                         return;
1914         }
1915
1916         /* no more funcs left */
1917         __unregister_ftrace_function(&trace_probe_ops);
1918         ftrace_shutdown(0);
1919         ftrace_probe_registered = 0;
1920 }
1921
1922
1923 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1924 {
1925         struct ftrace_func_probe *entry =
1926                 container_of(rhp, struct ftrace_func_probe, rcu);
1927
1928         if (entry->ops->free)
1929                 entry->ops->free(&entry->data);
1930         kfree(entry);
1931 }
1932
1933
1934 int
1935 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1936                               void *data)
1937 {
1938         struct ftrace_func_probe *entry;
1939         struct ftrace_page *pg;
1940         struct dyn_ftrace *rec;
1941         int type, len, not;
1942         unsigned long key;
1943         int count = 0;
1944         char *search;
1945
1946         type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1947         len = strlen(search);
1948
1949         /* we do not support '!' for function probes */
1950         if (WARN_ON(not))
1951                 return -EINVAL;
1952
1953         mutex_lock(&ftrace_lock);
1954         do_for_each_ftrace_rec(pg, rec) {
1955
1956                 if (rec->flags & FTRACE_FL_FAILED)
1957                         continue;
1958
1959                 if (!ftrace_match_record(rec, search, len, type))
1960                         continue;
1961
1962                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1963                 if (!entry) {
1964                         /* If we did not process any, then return error */
1965                         if (!count)
1966                                 count = -ENOMEM;
1967                         goto out_unlock;
1968                 }
1969
1970                 count++;
1971
1972                 entry->data = data;
1973
1974                 /*
1975                  * The caller might want to do something special
1976                  * for each function we find. We call the callback
1977                  * to give the caller an opportunity to do so.
1978                  */
1979                 if (ops->callback) {
1980                         if (ops->callback(rec->ip, &entry->data) < 0) {
1981                                 /* caller does not like this func */
1982                                 kfree(entry);
1983                                 continue;
1984                         }
1985                 }
1986
1987                 entry->ops = ops;
1988                 entry->ip = rec->ip;
1989
1990                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1991                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1992
1993         } while_for_each_ftrace_rec();
1994         __enable_ftrace_function_probe();
1995
1996  out_unlock:
1997         mutex_unlock(&ftrace_lock);
1998
1999         return count;
2000 }
2001
2002 enum {
2003         PROBE_TEST_FUNC         = 1,
2004         PROBE_TEST_DATA         = 2
2005 };
2006
2007 static void
2008 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2009                                   void *data, int flags)
2010 {
2011         struct ftrace_func_probe *entry;
2012         struct hlist_node *n, *tmp;
2013         char str[KSYM_SYMBOL_LEN];
2014         int type = MATCH_FULL;
2015         int i, len = 0;
2016         char *search;
2017
2018         if (glob && (strcmp(glob, "*") || !strlen(glob)))
2019                 glob = NULL;
2020         else {
2021                 int not;
2022
2023                 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
2024                 len = strlen(search);
2025
2026                 /* we do not support '!' for function probes */
2027                 if (WARN_ON(not))
2028                         return;
2029         }
2030
2031         mutex_lock(&ftrace_lock);
2032         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2033                 struct hlist_head *hhd = &ftrace_func_hash[i];
2034
2035                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2036
2037                         /* break up if statements for readability */
2038                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2039                                 continue;
2040
2041                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2042                                 continue;
2043
2044                         /* do this last, since it is the most expensive */
2045                         if (glob) {
2046                                 kallsyms_lookup(entry->ip, NULL, NULL,
2047                                                 NULL, str);
2048                                 if (!ftrace_match(str, glob, len, type))
2049                                         continue;
2050                         }
2051
2052                         hlist_del(&entry->node);
2053                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2054                 }
2055         }
2056         __disable_ftrace_function_probe();
2057         mutex_unlock(&ftrace_lock);
2058 }
2059
2060 void
2061 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2062                                 void *data)
2063 {
2064         __unregister_ftrace_function_probe(glob, ops, data,
2065                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2066 }
2067
2068 void
2069 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2070 {
2071         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2072 }
2073
2074 void unregister_ftrace_function_probe_all(char *glob)
2075 {
2076         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2077 }
2078
2079 static LIST_HEAD(ftrace_commands);
2080 static DEFINE_MUTEX(ftrace_cmd_mutex);
2081
2082 int register_ftrace_command(struct ftrace_func_command *cmd)
2083 {
2084         struct ftrace_func_command *p;
2085         int ret = 0;
2086
2087         mutex_lock(&ftrace_cmd_mutex);
2088         list_for_each_entry(p, &ftrace_commands, list) {
2089                 if (strcmp(cmd->name, p->name) == 0) {
2090                         ret = -EBUSY;
2091                         goto out_unlock;
2092                 }
2093         }
2094         list_add(&cmd->list, &ftrace_commands);
2095  out_unlock:
2096         mutex_unlock(&ftrace_cmd_mutex);
2097
2098         return ret;
2099 }
2100
2101 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2102 {
2103         struct ftrace_func_command *p, *n;
2104         int ret = -ENODEV;
2105
2106         mutex_lock(&ftrace_cmd_mutex);
2107         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2108                 if (strcmp(cmd->name, p->name) == 0) {
2109                         ret = 0;
2110                         list_del_init(&p->list);
2111                         goto out_unlock;
2112                 }
2113         }
2114  out_unlock:
2115         mutex_unlock(&ftrace_cmd_mutex);
2116
2117         return ret;
2118 }
2119
2120 static int ftrace_process_regex(char *buff, int len, int enable)
2121 {
2122         char *func, *command, *next = buff;
2123         struct ftrace_func_command *p;
2124         int ret = -EINVAL;
2125
2126         func = strsep(&next, ":");
2127
2128         if (!next) {
2129                 ftrace_match_records(func, len, enable);
2130                 return 0;
2131         }
2132
2133         /* command found */
2134
2135         command = strsep(&next, ":");
2136
2137         mutex_lock(&ftrace_cmd_mutex);
2138         list_for_each_entry(p, &ftrace_commands, list) {
2139                 if (strcmp(p->name, command) == 0) {
2140                         ret = p->func(func, command, next, enable);
2141                         goto out_unlock;
2142                 }
2143         }
2144  out_unlock:
2145         mutex_unlock(&ftrace_cmd_mutex);
2146
2147         return ret;
2148 }
2149
2150 static ssize_t
2151 ftrace_regex_write(struct file *file, const char __user *ubuf,
2152                    size_t cnt, loff_t *ppos, int enable)
2153 {
2154         struct ftrace_iterator *iter;
2155         char ch;
2156         size_t read = 0;
2157         ssize_t ret;
2158
2159         if (!cnt || cnt < 0)
2160                 return 0;
2161
2162         mutex_lock(&ftrace_regex_lock);
2163
2164         if (file->f_mode & FMODE_READ) {
2165                 struct seq_file *m = file->private_data;
2166                 iter = m->private;
2167         } else
2168                 iter = file->private_data;
2169
2170         if (!*ppos) {
2171                 iter->flags &= ~FTRACE_ITER_CONT;
2172                 iter->buffer_idx = 0;
2173         }
2174
2175         ret = get_user(ch, ubuf++);
2176         if (ret)
2177                 goto out;
2178         read++;
2179         cnt--;
2180
2181         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
2182                 /* skip white space */
2183                 while (cnt && isspace(ch)) {
2184                         ret = get_user(ch, ubuf++);
2185                         if (ret)
2186                                 goto out;
2187                         read++;
2188                         cnt--;
2189                 }
2190
2191                 if (isspace(ch)) {
2192                         file->f_pos += read;
2193                         ret = read;
2194                         goto out;
2195                 }
2196
2197                 iter->buffer_idx = 0;
2198         }
2199
2200         while (cnt && !isspace(ch)) {
2201                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
2202                         iter->buffer[iter->buffer_idx++] = ch;
2203                 else {
2204                         ret = -EINVAL;
2205                         goto out;
2206                 }
2207                 ret = get_user(ch, ubuf++);
2208                 if (ret)
2209                         goto out;
2210                 read++;
2211                 cnt--;
2212         }
2213
2214         if (isspace(ch)) {
2215                 iter->filtered++;
2216                 iter->buffer[iter->buffer_idx] = 0;
2217                 ret = ftrace_process_regex(iter->buffer,
2218                                            iter->buffer_idx, enable);
2219                 if (ret)
2220                         goto out;
2221                 iter->buffer_idx = 0;
2222         } else
2223                 iter->flags |= FTRACE_ITER_CONT;
2224
2225
2226         file->f_pos += read;
2227
2228         ret = read;
2229  out:
2230         mutex_unlock(&ftrace_regex_lock);
2231
2232         return ret;
2233 }
2234
2235 static ssize_t
2236 ftrace_filter_write(struct file *file, const char __user *ubuf,
2237                     size_t cnt, loff_t *ppos)
2238 {
2239         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2240 }
2241
2242 static ssize_t
2243 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2244                      size_t cnt, loff_t *ppos)
2245 {
2246         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2247 }
2248
2249 static void
2250 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
2251 {
2252         if (unlikely(ftrace_disabled))
2253                 return;
2254
2255         mutex_lock(&ftrace_regex_lock);
2256         if (reset)
2257                 ftrace_filter_reset(enable);
2258         if (buf)
2259                 ftrace_match_records(buf, len, enable);
2260         mutex_unlock(&ftrace_regex_lock);
2261 }
2262
2263 /**
2264  * ftrace_set_filter - set a function to filter on in ftrace
2265  * @buf - the string that holds the function filter text.
2266  * @len - the length of the string.
2267  * @reset - non zero to reset all filters before applying this filter.
2268  *
2269  * Filters denote which functions should be enabled when tracing is enabled.
2270  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2271  */
2272 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2273 {
2274         ftrace_set_regex(buf, len, reset, 1);
2275 }
2276
2277 /**
2278  * ftrace_set_notrace - set a function to not trace in ftrace
2279  * @buf - the string that holds the function notrace text.
2280  * @len - the length of the string.
2281  * @reset - non zero to reset all filters before applying this filter.
2282  *
2283  * Notrace Filters denote which functions should not be enabled when tracing
2284  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2285  * for tracing.
2286  */
2287 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2288 {
2289         ftrace_set_regex(buf, len, reset, 0);
2290 }
2291
2292 static int
2293 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2294 {
2295         struct seq_file *m = (struct seq_file *)file->private_data;
2296         struct ftrace_iterator *iter;
2297
2298         mutex_lock(&ftrace_regex_lock);
2299         if (file->f_mode & FMODE_READ) {
2300                 iter = m->private;
2301
2302                 seq_release(inode, file);
2303         } else
2304                 iter = file->private_data;
2305
2306         if (iter->buffer_idx) {
2307                 iter->filtered++;
2308                 iter->buffer[iter->buffer_idx] = 0;
2309                 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2310         }
2311
2312         mutex_lock(&ftrace_lock);
2313         if (ftrace_start_up && ftrace_enabled)
2314                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2315         mutex_unlock(&ftrace_lock);
2316
2317         kfree(iter);
2318         mutex_unlock(&ftrace_regex_lock);
2319         return 0;
2320 }
2321
2322 static int
2323 ftrace_filter_release(struct inode *inode, struct file *file)
2324 {
2325         return ftrace_regex_release(inode, file, 1);
2326 }
2327
2328 static int
2329 ftrace_notrace_release(struct inode *inode, struct file *file)
2330 {
2331         return ftrace_regex_release(inode, file, 0);
2332 }
2333
2334 static const struct file_operations ftrace_avail_fops = {
2335         .open = ftrace_avail_open,
2336         .read = seq_read,
2337         .llseek = seq_lseek,
2338         .release = ftrace_avail_release,
2339 };
2340
2341 static const struct file_operations ftrace_failures_fops = {
2342         .open = ftrace_failures_open,
2343         .read = seq_read,
2344         .llseek = seq_lseek,
2345         .release = ftrace_avail_release,
2346 };
2347
2348 static const struct file_operations ftrace_filter_fops = {
2349         .open = ftrace_filter_open,
2350         .read = seq_read,
2351         .write = ftrace_filter_write,
2352         .llseek = ftrace_regex_lseek,
2353         .release = ftrace_filter_release,
2354 };
2355
2356 static const struct file_operations ftrace_notrace_fops = {
2357         .open = ftrace_notrace_open,
2358         .read = seq_read,
2359         .write = ftrace_notrace_write,
2360         .llseek = ftrace_regex_lseek,
2361         .release = ftrace_notrace_release,
2362 };
2363
2364 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2365
2366 static DEFINE_MUTEX(graph_lock);
2367
2368 int ftrace_graph_count;
2369 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2370
2371 static void *
2372 g_next(struct seq_file *m, void *v, loff_t *pos)
2373 {
2374         unsigned long *array = m->private;
2375         int index = *pos;
2376
2377         (*pos)++;
2378
2379         if (index >= ftrace_graph_count)
2380                 return NULL;
2381
2382         return &array[index];
2383 }
2384
2385 static void *g_start(struct seq_file *m, loff_t *pos)
2386 {
2387         void *p = NULL;
2388
2389         mutex_lock(&graph_lock);
2390
2391         /* Nothing, tell g_show to print all functions are enabled */
2392         if (!ftrace_graph_count && !*pos)
2393                 return (void *)1;
2394
2395         p = g_next(m, p, pos);
2396
2397         return p;
2398 }
2399
2400 static void g_stop(struct seq_file *m, void *p)
2401 {
2402         mutex_unlock(&graph_lock);
2403 }
2404
2405 static int g_show(struct seq_file *m, void *v)
2406 {
2407         unsigned long *ptr = v;
2408         char str[KSYM_SYMBOL_LEN];
2409
2410         if (!ptr)
2411                 return 0;
2412
2413         if (ptr == (unsigned long *)1) {
2414                 seq_printf(m, "#### all functions enabled ####\n");
2415                 return 0;
2416         }
2417
2418         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
2419
2420         seq_printf(m, "%s\n", str);
2421
2422         return 0;
2423 }
2424
2425 static struct seq_operations ftrace_graph_seq_ops = {
2426         .start = g_start,
2427         .next = g_next,
2428         .stop = g_stop,
2429         .show = g_show,
2430 };
2431
2432 static int
2433 ftrace_graph_open(struct inode *inode, struct file *file)
2434 {
2435         int ret = 0;
2436
2437         if (unlikely(ftrace_disabled))
2438                 return -ENODEV;
2439
2440         mutex_lock(&graph_lock);
2441         if ((file->f_mode & FMODE_WRITE) &&
2442             !(file->f_flags & O_APPEND)) {
2443                 ftrace_graph_count = 0;
2444                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2445         }
2446
2447         if (file->f_mode & FMODE_READ) {
2448                 ret = seq_open(file, &ftrace_graph_seq_ops);
2449                 if (!ret) {
2450                         struct seq_file *m = file->private_data;
2451                         m->private = ftrace_graph_funcs;
2452                 }
2453         } else
2454                 file->private_data = ftrace_graph_funcs;
2455         mutex_unlock(&graph_lock);
2456
2457         return ret;
2458 }
2459
2460 static int
2461 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2462 {
2463         struct dyn_ftrace *rec;
2464         struct ftrace_page *pg;
2465         int search_len;
2466         int found = 0;
2467         int type, not;
2468         char *search;
2469         bool exists;
2470         int i;
2471
2472         if (ftrace_disabled)
2473                 return -ENODEV;
2474
2475         /* decode regex */
2476         type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2477         if (not)
2478                 return -EINVAL;
2479
2480         search_len = strlen(search);
2481
2482         mutex_lock(&ftrace_lock);
2483         do_for_each_ftrace_rec(pg, rec) {
2484
2485                 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2486                         break;
2487
2488                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2489                         continue;
2490
2491                 if (ftrace_match_record(rec, search, search_len, type)) {
2492                         /* ensure it is not already in the array */
2493                         exists = false;
2494                         for (i = 0; i < *idx; i++)
2495                                 if (array[i] == rec->ip) {
2496                                         exists = true;
2497                                         break;
2498                                 }
2499                         if (!exists) {
2500                                 array[(*idx)++] = rec->ip;
2501                                 found = 1;
2502                         }
2503                 }
2504         } while_for_each_ftrace_rec();
2505
2506         mutex_unlock(&ftrace_lock);
2507
2508         return found ? 0 : -EINVAL;
2509 }
2510
2511 static ssize_t
2512 ftrace_graph_write(struct file *file, const char __user *ubuf,
2513                    size_t cnt, loff_t *ppos)
2514 {
2515         unsigned char buffer[FTRACE_BUFF_MAX+1];
2516         unsigned long *array;
2517         size_t read = 0;
2518         ssize_t ret;
2519         int index = 0;
2520         char ch;
2521
2522         if (!cnt || cnt < 0)
2523                 return 0;
2524
2525         mutex_lock(&graph_lock);
2526
2527         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2528                 ret = -EBUSY;
2529                 goto out;
2530         }
2531
2532         if (file->f_mode & FMODE_READ) {
2533                 struct seq_file *m = file->private_data;
2534                 array = m->private;
2535         } else
2536                 array = file->private_data;
2537
2538         ret = get_user(ch, ubuf++);
2539         if (ret)
2540                 goto out;
2541         read++;
2542         cnt--;
2543
2544         /* skip white space */
2545         while (cnt && isspace(ch)) {
2546                 ret = get_user(ch, ubuf++);
2547                 if (ret)
2548                         goto out;
2549                 read++;
2550                 cnt--;
2551         }
2552
2553         if (isspace(ch)) {
2554                 *ppos += read;
2555                 ret = read;
2556                 goto out;
2557         }
2558
2559         while (cnt && !isspace(ch)) {
2560                 if (index < FTRACE_BUFF_MAX)
2561                         buffer[index++] = ch;
2562                 else {
2563                         ret = -EINVAL;
2564                         goto out;
2565                 }
2566                 ret = get_user(ch, ubuf++);
2567                 if (ret)
2568                         goto out;
2569                 read++;
2570                 cnt--;
2571         }
2572         buffer[index] = 0;
2573
2574         /* we allow only one expression at a time */
2575         ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2576         if (ret)
2577                 goto out;
2578
2579         file->f_pos += read;
2580
2581         ret = read;
2582  out:
2583         mutex_unlock(&graph_lock);
2584
2585         return ret;
2586 }
2587
2588 static const struct file_operations ftrace_graph_fops = {
2589         .open = ftrace_graph_open,
2590         .read = seq_read,
2591         .write = ftrace_graph_write,
2592 };
2593 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2594
2595 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2596 {
2597         struct dentry *entry;
2598
2599         entry = debugfs_create_file("available_filter_functions", 0444,
2600                                     d_tracer, NULL, &ftrace_avail_fops);
2601         if (!entry)
2602                 pr_warning("Could not create debugfs "
2603                            "'available_filter_functions' entry\n");
2604
2605         entry = debugfs_create_file("failures", 0444,
2606                                     d_tracer, NULL, &ftrace_failures_fops);
2607         if (!entry)
2608                 pr_warning("Could not create debugfs 'failures' entry\n");
2609
2610         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2611                                     NULL, &ftrace_filter_fops);
2612         if (!entry)
2613                 pr_warning("Could not create debugfs "
2614                            "'set_ftrace_filter' entry\n");
2615
2616         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2617                                     NULL, &ftrace_notrace_fops);
2618         if (!entry)
2619                 pr_warning("Could not create debugfs "
2620                            "'set_ftrace_notrace' entry\n");
2621
2622 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2623         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2624                                     NULL,
2625                                     &ftrace_graph_fops);
2626         if (!entry)
2627                 pr_warning("Could not create debugfs "
2628                            "'set_graph_function' entry\n");
2629 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2630
2631         return 0;
2632 }
2633
2634 static int ftrace_convert_nops(struct module *mod,
2635                                unsigned long *start,
2636                                unsigned long *end)
2637 {
2638         unsigned long *p;
2639         unsigned long addr;
2640         unsigned long flags;
2641
2642         mutex_lock(&ftrace_lock);
2643         p = start;
2644         while (p < end) {
2645                 addr = ftrace_call_adjust(*p++);
2646                 /*
2647                  * Some architecture linkers will pad between
2648                  * the different mcount_loc sections of different
2649                  * object files to satisfy alignments.
2650                  * Skip any NULL pointers.
2651                  */
2652                 if (!addr)
2653                         continue;
2654                 ftrace_record_ip(addr);
2655         }
2656
2657         /* disable interrupts to prevent kstop machine */
2658         local_irq_save(flags);
2659         ftrace_update_code(mod);
2660         local_irq_restore(flags);
2661         mutex_unlock(&ftrace_lock);
2662
2663         return 0;
2664 }
2665
2666 void ftrace_init_module(struct module *mod,
2667                         unsigned long *start, unsigned long *end)
2668 {
2669         if (ftrace_disabled || start == end)
2670                 return;
2671         ftrace_convert_nops(mod, start, end);
2672 }
2673
2674 extern unsigned long __start_mcount_loc[];
2675 extern unsigned long __stop_mcount_loc[];
2676
2677 void __init ftrace_init(void)
2678 {
2679         unsigned long count, addr, flags;
2680         int ret;
2681
2682         /* Keep the ftrace pointer to the stub */
2683         addr = (unsigned long)ftrace_stub;
2684
2685         local_irq_save(flags);
2686         ftrace_dyn_arch_init(&addr);
2687         local_irq_restore(flags);
2688
2689         /* ftrace_dyn_arch_init places the return code in addr */
2690         if (addr)
2691                 goto failed;
2692
2693         count = __stop_mcount_loc - __start_mcount_loc;
2694
2695         ret = ftrace_dyn_table_alloc(count);
2696         if (ret)
2697                 goto failed;
2698
2699         last_ftrace_enabled = ftrace_enabled = 1;
2700
2701         ret = ftrace_convert_nops(NULL,
2702                                   __start_mcount_loc,
2703                                   __stop_mcount_loc);
2704
2705         return;
2706  failed:
2707         ftrace_disabled = 1;
2708 }
2709
2710 #else
2711
2712 static int __init ftrace_nodyn_init(void)
2713 {
2714         ftrace_enabled = 1;
2715         return 0;
2716 }
2717 device_initcall(ftrace_nodyn_init);
2718
2719 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2720 static inline void ftrace_startup_enable(int command) { }
2721 /* Keep as macros so we do not need to define the commands */
2722 # define ftrace_startup(command)        do { } while (0)
2723 # define ftrace_shutdown(command)       do { } while (0)
2724 # define ftrace_startup_sysctl()        do { } while (0)
2725 # define ftrace_shutdown_sysctl()       do { } while (0)
2726 #endif /* CONFIG_DYNAMIC_FTRACE */
2727
2728 static ssize_t
2729 ftrace_pid_read(struct file *file, char __user *ubuf,
2730                        size_t cnt, loff_t *ppos)
2731 {
2732         char buf[64];
2733         int r;
2734
2735         if (ftrace_pid_trace == ftrace_swapper_pid)
2736                 r = sprintf(buf, "swapper tasks\n");
2737         else if (ftrace_pid_trace)
2738                 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2739         else
2740                 r = sprintf(buf, "no pid\n");
2741
2742         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2743 }
2744
2745 static void clear_ftrace_swapper(void)
2746 {
2747         struct task_struct *p;
2748         int cpu;
2749
2750         get_online_cpus();
2751         for_each_online_cpu(cpu) {
2752                 p = idle_task(cpu);
2753                 clear_tsk_trace_trace(p);
2754         }
2755         put_online_cpus();
2756 }
2757
2758 static void set_ftrace_swapper(void)
2759 {
2760         struct task_struct *p;
2761         int cpu;
2762
2763         get_online_cpus();
2764         for_each_online_cpu(cpu) {
2765                 p = idle_task(cpu);
2766                 set_tsk_trace_trace(p);
2767         }
2768         put_online_cpus();
2769 }
2770
2771 static void clear_ftrace_pid(struct pid *pid)
2772 {
2773         struct task_struct *p;
2774
2775         rcu_read_lock();
2776         do_each_pid_task(pid, PIDTYPE_PID, p) {
2777                 clear_tsk_trace_trace(p);
2778         } while_each_pid_task(pid, PIDTYPE_PID, p);
2779         rcu_read_unlock();
2780
2781         put_pid(pid);
2782 }
2783
2784 static void set_ftrace_pid(struct pid *pid)
2785 {
2786         struct task_struct *p;
2787
2788         rcu_read_lock();
2789         do_each_pid_task(pid, PIDTYPE_PID, p) {
2790                 set_tsk_trace_trace(p);
2791         } while_each_pid_task(pid, PIDTYPE_PID, p);
2792         rcu_read_unlock();
2793 }
2794
2795 static void clear_ftrace_pid_task(struct pid **pid)
2796 {
2797         if (*pid == ftrace_swapper_pid)
2798                 clear_ftrace_swapper();
2799         else
2800                 clear_ftrace_pid(*pid);
2801
2802         *pid = NULL;
2803 }
2804
2805 static void set_ftrace_pid_task(struct pid *pid)
2806 {
2807         if (pid == ftrace_swapper_pid)
2808                 set_ftrace_swapper();
2809         else
2810                 set_ftrace_pid(pid);
2811 }
2812
2813 static ssize_t
2814 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2815                    size_t cnt, loff_t *ppos)
2816 {
2817         struct pid *pid;
2818         char buf[64];
2819         long val;
2820         int ret;
2821
2822         if (cnt >= sizeof(buf))
2823                 return -EINVAL;
2824
2825         if (copy_from_user(&buf, ubuf, cnt))
2826                 return -EFAULT;
2827
2828         buf[cnt] = 0;
2829
2830         ret = strict_strtol(buf, 10, &val);
2831         if (ret < 0)
2832                 return ret;
2833
2834         mutex_lock(&ftrace_lock);
2835         if (val < 0) {
2836                 /* disable pid tracing */
2837                 if (!ftrace_pid_trace)
2838                         goto out;
2839
2840                 clear_ftrace_pid_task(&ftrace_pid_trace);
2841
2842         } else {
2843                 /* swapper task is special */
2844                 if (!val) {
2845                         pid = ftrace_swapper_pid;
2846                         if (pid == ftrace_pid_trace)
2847                                 goto out;
2848                 } else {
2849                         pid = find_get_pid(val);
2850
2851                         if (pid == ftrace_pid_trace) {
2852                                 put_pid(pid);
2853                                 goto out;
2854                         }
2855                 }
2856
2857                 if (ftrace_pid_trace)
2858                         clear_ftrace_pid_task(&ftrace_pid_trace);
2859
2860                 if (!pid)
2861                         goto out;
2862
2863                 ftrace_pid_trace = pid;
2864
2865                 set_ftrace_pid_task(ftrace_pid_trace);
2866         }
2867
2868         /* update the function call */
2869         ftrace_update_pid_func();
2870         ftrace_startup_enable(0);
2871
2872  out:
2873         mutex_unlock(&ftrace_lock);
2874
2875         return cnt;
2876 }
2877
2878 static const struct file_operations ftrace_pid_fops = {
2879         .read = ftrace_pid_read,
2880         .write = ftrace_pid_write,
2881 };
2882
2883 static __init int ftrace_init_debugfs(void)
2884 {
2885         struct dentry *d_tracer;
2886         struct dentry *entry;
2887
2888         d_tracer = tracing_init_dentry();
2889         if (!d_tracer)
2890                 return 0;
2891
2892         ftrace_init_dyn_debugfs(d_tracer);
2893
2894         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2895                                     NULL, &ftrace_pid_fops);
2896         if (!entry)
2897                 pr_warning("Could not create debugfs "
2898                            "'set_ftrace_pid' entry\n");
2899
2900         ftrace_profile_debugfs(d_tracer);
2901
2902         return 0;
2903 }
2904 fs_initcall(ftrace_init_debugfs);
2905
2906 /**
2907  * ftrace_kill - kill ftrace
2908  *
2909  * This function should be used by panic code. It stops ftrace
2910  * but in a not so nice way. If you need to simply kill ftrace
2911  * from a non-atomic section, use ftrace_kill.
2912  */
2913 void ftrace_kill(void)
2914 {
2915         ftrace_disabled = 1;
2916         ftrace_enabled = 0;
2917         clear_ftrace_function();
2918 }
2919
2920 /**
2921  * register_ftrace_function - register a function for profiling
2922  * @ops - ops structure that holds the function for profiling.
2923  *
2924  * Register a function to be called by all functions in the
2925  * kernel.
2926  *
2927  * Note: @ops->func and all the functions it calls must be labeled
2928  *       with "notrace", otherwise it will go into a
2929  *       recursive loop.
2930  */
2931 int register_ftrace_function(struct ftrace_ops *ops)
2932 {
2933         int ret;
2934
2935         if (unlikely(ftrace_disabled))
2936                 return -1;
2937
2938         mutex_lock(&ftrace_lock);
2939
2940         ret = __register_ftrace_function(ops);
2941         ftrace_startup(0);
2942
2943         mutex_unlock(&ftrace_lock);
2944         return ret;
2945 }
2946
2947 /**
2948  * unregister_ftrace_function - unregister a function for profiling.
2949  * @ops - ops structure that holds the function to unregister
2950  *
2951  * Unregister a function that was added to be called by ftrace profiling.
2952  */
2953 int unregister_ftrace_function(struct ftrace_ops *ops)
2954 {
2955         int ret;
2956
2957         mutex_lock(&ftrace_lock);
2958         ret = __unregister_ftrace_function(ops);
2959         ftrace_shutdown(0);
2960         mutex_unlock(&ftrace_lock);
2961
2962         return ret;
2963 }
2964
2965 int
2966 ftrace_enable_sysctl(struct ctl_table *table, int write,
2967                      struct file *file, void __user *buffer, size_t *lenp,
2968                      loff_t *ppos)
2969 {
2970         int ret;
2971
2972         if (unlikely(ftrace_disabled))
2973                 return -ENODEV;
2974
2975         mutex_lock(&ftrace_lock);
2976
2977         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
2978
2979         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2980                 goto out;
2981
2982         last_ftrace_enabled = ftrace_enabled;
2983
2984         if (ftrace_enabled) {
2985
2986                 ftrace_startup_sysctl();
2987
2988                 /* we are starting ftrace again */
2989                 if (ftrace_list != &ftrace_list_end) {
2990                         if (ftrace_list->next == &ftrace_list_end)
2991                                 ftrace_trace_function = ftrace_list->func;
2992                         else
2993                                 ftrace_trace_function = ftrace_list_func;
2994                 }
2995
2996         } else {
2997                 /* stopping ftrace calls (just send to ftrace_stub) */
2998                 ftrace_trace_function = ftrace_stub;
2999
3000                 ftrace_shutdown_sysctl();
3001         }
3002
3003  out:
3004         mutex_unlock(&ftrace_lock);
3005         return ret;
3006 }
3007
3008 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3009
3010 static atomic_t ftrace_graph_active;
3011 static struct notifier_block ftrace_suspend_notifier;
3012
3013 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3014 {
3015         return 0;
3016 }
3017
3018 /* The callbacks that hook a function */
3019 trace_func_graph_ret_t ftrace_graph_return =
3020                         (trace_func_graph_ret_t)ftrace_stub;
3021 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3022
3023 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3024 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3025 {
3026         int i;
3027         int ret = 0;
3028         unsigned long flags;
3029         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3030         struct task_struct *g, *t;
3031
3032         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3033                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3034                                         * sizeof(struct ftrace_ret_stack),
3035                                         GFP_KERNEL);
3036                 if (!ret_stack_list[i]) {
3037                         start = 0;
3038                         end = i;
3039                         ret = -ENOMEM;
3040                         goto free;
3041                 }
3042         }
3043
3044         read_lock_irqsave(&tasklist_lock, flags);
3045         do_each_thread(g, t) {
3046                 if (start == end) {
3047                         ret = -EAGAIN;
3048                         goto unlock;
3049                 }
3050
3051                 if (t->ret_stack == NULL) {
3052                         t->curr_ret_stack = -1;
3053                         /* Make sure IRQs see the -1 first: */
3054                         barrier();
3055                         t->ret_stack = ret_stack_list[start++];
3056                         atomic_set(&t->tracing_graph_pause, 0);
3057                         atomic_set(&t->trace_overrun, 0);
3058                 }
3059         } while_each_thread(g, t);
3060
3061 unlock:
3062         read_unlock_irqrestore(&tasklist_lock, flags);
3063 free:
3064         for (i = start; i < end; i++)
3065                 kfree(ret_stack_list[i]);
3066         return ret;
3067 }
3068
3069 static void
3070 ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
3071                                 struct task_struct *next)
3072 {
3073         unsigned long long timestamp;
3074         int index;
3075
3076         /*
3077          * Does the user want to count the time a function was asleep.
3078          * If so, do not update the time stamps.
3079          */
3080         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3081                 return;
3082
3083         timestamp = trace_clock_local();
3084
3085         prev->ftrace_timestamp = timestamp;
3086
3087         /* only process tasks that we timestamped */
3088         if (!next->ftrace_timestamp)
3089                 return;
3090
3091         /*
3092          * Update all the counters in next to make up for the
3093          * time next was sleeping.
3094          */
3095         timestamp -= next->ftrace_timestamp;
3096
3097         for (index = next->curr_ret_stack; index >= 0; index--)
3098                 next->ret_stack[index].calltime += timestamp;
3099 }
3100
3101 /* Allocate a return stack for each task */
3102 static int start_graph_tracing(void)
3103 {
3104         struct ftrace_ret_stack **ret_stack_list;
3105         int ret, cpu;
3106
3107         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3108                                 sizeof(struct ftrace_ret_stack *),
3109                                 GFP_KERNEL);
3110
3111         if (!ret_stack_list)
3112                 return -ENOMEM;
3113
3114         /* The cpu_boot init_task->ret_stack will never be freed */
3115         for_each_online_cpu(cpu)
3116                 ftrace_graph_init_task(idle_task(cpu));
3117
3118         do {
3119                 ret = alloc_retstack_tasklist(ret_stack_list);
3120         } while (ret == -EAGAIN);
3121
3122         if (!ret) {
3123                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
3124                 if (ret)
3125                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3126                                 " probe to kernel_sched_switch\n");
3127         }
3128
3129         kfree(ret_stack_list);
3130         return ret;
3131 }
3132
3133 /*
3134  * Hibernation protection.
3135  * The state of the current task is too much unstable during
3136  * suspend/restore to disk. We want to protect against that.
3137  */
3138 static int
3139 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3140                                                         void *unused)
3141 {
3142         switch (state) {
3143         case PM_HIBERNATION_PREPARE:
3144                 pause_graph_tracing();
3145                 break;
3146
3147         case PM_POST_HIBERNATION:
3148                 unpause_graph_tracing();
3149                 break;
3150         }
3151         return NOTIFY_DONE;
3152 }
3153
3154 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3155                         trace_func_graph_ent_t entryfunc)
3156 {
3157         int ret = 0;
3158
3159         mutex_lock(&ftrace_lock);
3160
3161         /* we currently allow only one tracer registered at a time */
3162         if (atomic_read(&ftrace_graph_active)) {
3163                 ret = -EBUSY;
3164                 goto out;
3165         }
3166
3167         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3168         register_pm_notifier(&ftrace_suspend_notifier);
3169
3170         atomic_inc(&ftrace_graph_active);
3171         ret = start_graph_tracing();
3172         if (ret) {
3173                 atomic_dec(&ftrace_graph_active);
3174                 goto out;
3175         }
3176
3177         ftrace_graph_return = retfunc;
3178         ftrace_graph_entry = entryfunc;
3179
3180         ftrace_startup(FTRACE_START_FUNC_RET);
3181
3182 out:
3183         mutex_unlock(&ftrace_lock);
3184         return ret;
3185 }
3186
3187 void unregister_ftrace_graph(void)
3188 {
3189         mutex_lock(&ftrace_lock);
3190
3191         atomic_dec(&ftrace_graph_active);
3192         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
3193         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3194         ftrace_graph_entry = ftrace_graph_entry_stub;
3195         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3196         unregister_pm_notifier(&ftrace_suspend_notifier);
3197
3198         mutex_unlock(&ftrace_lock);
3199 }
3200
3201 /* Allocate a return stack for newly created task */
3202 void ftrace_graph_init_task(struct task_struct *t)
3203 {
3204         if (atomic_read(&ftrace_graph_active)) {
3205                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3206                                 * sizeof(struct ftrace_ret_stack),
3207                                 GFP_KERNEL);
3208                 if (!t->ret_stack)
3209                         return;
3210                 t->curr_ret_stack = -1;
3211                 atomic_set(&t->tracing_graph_pause, 0);
3212                 atomic_set(&t->trace_overrun, 0);
3213                 t->ftrace_timestamp = 0;
3214         } else
3215                 t->ret_stack = NULL;
3216 }
3217
3218 void ftrace_graph_exit_task(struct task_struct *t)
3219 {
3220         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3221
3222         t->ret_stack = NULL;
3223         /* NULL must become visible to IRQs before we free it: */
3224         barrier();
3225
3226         kfree(ret_stack);
3227 }
3228
3229 void ftrace_graph_stop(void)
3230 {
3231         ftrace_stop();
3232 }
3233 #endif
3234