ftrace: Fix regression of :mod:module function enabling
[linux-2.6.git] / kernel / trace / trace_functions.c
index 021a574..c7b0c6a 100644 (file)
@@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
        struct trace_array_cpu *data;
        unsigned long flags;
        long disabled;
-       int cpu, resched;
+       int cpu;
        int pc;
 
        if (unlikely(!ftrace_function_enabled))
                return;
 
        pc = preempt_count();
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
        local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
                trace_function(tr, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 }
 
 static void
@@ -149,11 +149,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
 static struct ftrace_ops trace_ops __read_mostly =
 {
        .func = function_trace_call,
+       .flags = FTRACE_OPS_FL_GLOBAL,
 };
 
 static struct ftrace_ops trace_stack_ops __read_mostly =
 {
        .func = function_stack_trace_call,
+       .flags = FTRACE_OPS_FL_GLOBAL,
 };
 
 /* Our two options */
@@ -193,9 +195,11 @@ static void tracing_start_function_trace(void)
 static void tracing_stop_function_trace(void)
 {
        ftrace_function_enabled = 0;
-       /* OK if they are not registered */
-       unregister_ftrace_function(&trace_stack_ops);
-       unregister_ftrace_function(&trace_ops);
+
+       if (func_flags.val & TRACE_FUNC_OPT_STACK)
+               unregister_ftrace_function(&trace_stack_ops);
+       else
+               unregister_ftrace_function(&trace_ops);
 }
 
 static int func_set_flag(u32 old_flags, u32 bit, int set)
@@ -225,6 +229,7 @@ static struct tracer function_trace __read_mostly =
        .init           = function_trace_init,
        .reset          = function_trace_reset,
        .start          = function_trace_start,
+       .wait_pipe      = poll_wait_pipe,
        .flags          = &func_flags,
        .set_flag       = func_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
@@ -269,36 +274,35 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 static int
 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
-                        struct ftrace_hook_ops *ops, void *data);
+                        struct ftrace_probe_ops *ops, void *data);
 
-static struct ftrace_hook_ops traceon_hook_ops = {
+static struct ftrace_probe_ops traceon_probe_ops = {
        .func                   = ftrace_traceon,
        .print                  = ftrace_trace_onoff_print,
 };
 
-static struct ftrace_hook_ops traceoff_hook_ops = {
+static struct ftrace_probe_ops traceoff_probe_ops = {
        .func                   = ftrace_traceoff,
        .print                  = ftrace_trace_onoff_print,
 };
 
 static int
 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
-                        struct ftrace_hook_ops *ops, void *data)
+                        struct ftrace_probe_ops *ops, void *data)
 {
-       char str[KSYM_SYMBOL_LEN];
        long count = (long)data;
 
-       kallsyms_lookup(ip, NULL, NULL, NULL, str);
-       seq_printf(m, "%s:", str);
+       seq_printf(m, "%ps:", (void *)ip);
 
-       if (ops == &traceon_hook_ops)
+       if (ops == &traceon_probe_ops)
                seq_printf(m, "traceon");
        else
                seq_printf(m, "traceoff");
 
-       if (count != -1)
-               seq_printf(m, ":count=%ld", count);
-       seq_putc(m, '\n');
+       if (count == -1)
+               seq_printf(m, ":unlimited\n");
+       else
+               seq_printf(m, ":count=%ld\n", count);
 
        return 0;
 }
@@ -306,23 +310,24 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
 static int
 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
 {
-       struct ftrace_hook_ops *ops;
+       struct ftrace_probe_ops *ops;
 
        /* we register both traceon and traceoff to this callback */
        if (strcmp(cmd, "traceon") == 0)
-               ops = &traceon_hook_ops;
+               ops = &traceon_probe_ops;
        else
-               ops = &traceoff_hook_ops;
+               ops = &traceoff_probe_ops;
 
-       unregister_ftrace_function_hook_func(glob, ops);
+       unregister_ftrace_function_probe_func(glob, ops);
 
        return 0;
 }
 
 static int
-ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
+ftrace_trace_onoff_callback(struct ftrace_hash *hash,
+                           char *glob, char *cmd, char *param, int enable)
 {
-       struct ftrace_hook_ops *ops;
+       struct ftrace_probe_ops *ops;
        void *count = (void *)-1;
        char *number;
        int ret;
@@ -336,9 +341,9 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
 
        /* we register both traceon and traceoff to this callback */
        if (strcmp(cmd, "traceon") == 0)
-               ops = &traceon_hook_ops;
+               ops = &traceon_probe_ops;
        else
-               ops = &traceoff_hook_ops;
+               ops = &traceoff_probe_ops;
 
        if (!param)
                goto out_reg;
@@ -357,9 +362,9 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
                return ret;
 
  out_reg:
-       ret = register_ftrace_function_hook(glob, ops, count);
+       ret = register_ftrace_function_probe(glob, ops, count);
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 
 static struct ftrace_func_command ftrace_traceon_cmd = {