ftrace: Update the kconfig for DYNAMIC_FTRACE
[linux-2.6.git] / kernel / tracepoint.c
index d6073a5..d96ba22 100644 (file)
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
-extern struct tracepoint __start___tracepoints[];
-extern struct tracepoint __stop___tracepoints[];
+extern struct tracepoint * const __start___tracepoints_ptrs[];
+extern struct tracepoint * const __stop___tracepoints_ptrs[];
 
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
 /*
- * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
- * builtin and module tracepoints and the hash table.
+ * Tracepoints mutex protects the builtin and module tracepoints and the hash
+ * table, as well as the local module list.
  */
 static DEFINE_MUTEX(tracepoints_mutex);
 
+#ifdef CONFIG_MODULES
+/* Local list of struct module */
+static LIST_HEAD(tracepoint_module_list);
+#endif /* CONFIG_MODULES */
+
 /*
  * Tracepoint hash table, containing the active tracepoints.
  * Protected by tracepoints_mutex.
@@ -251,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
 {
        WARN_ON(strcmp((*entry)->name, elem->name) != 0);
 
-       if (elem->regfunc && !elem->state && active)
+       if (elem->regfunc && !static_key_enabled(&elem->key) && active)
                elem->regfunc();
-       else if (elem->unregfunc && elem->state && !active)
+       else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
                elem->unregfunc();
 
        /*
@@ -264,13 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
         * is used.
         */
        rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-       if (!elem->state && active) {
-               enable_jump_label(&elem->state);
-               elem->state = active;
-       } else if (elem->state && !active) {
-               disable_jump_label(&elem->state);
-               elem->state = active;
-       }
+       if (active && !static_key_enabled(&elem->key))
+               static_key_slow_inc(&elem->key);
+       else if (!active && static_key_enabled(&elem->key))
+               static_key_slow_dec(&elem->key);
 }
 
 /*
@@ -281,13 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
  */
 static void disable_tracepoint(struct tracepoint *elem)
 {
-       if (elem->unregfunc && elem->state)
+       if (elem->unregfunc && static_key_enabled(&elem->key))
                elem->unregfunc();
 
-       if (elem->state) {
-               disable_jump_label(&elem->state);
-               elem->state = 0;
-       }
+       if (static_key_enabled(&elem->key))
+               static_key_slow_dec(&elem->key);
        rcu_assign_pointer(elem->funcs, NULL);
 }
 
@@ -297,37 +297,53 @@ static void disable_tracepoint(struct tracepoint *elem)
  * @end: end of the range
  *
  * Updates the probe callback corresponding to a range of tracepoints.
+ * Called with tracepoints_mutex held.
  */
-void
-tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
+static void tracepoint_update_probe_range(struct tracepoint * const *begin,
+                                         struct tracepoint * const *end)
 {
-       struct tracepoint *iter;
+       struct tracepoint * const *iter;
        struct tracepoint_entry *mark_entry;
 
        if (!begin)
                return;
 
-       mutex_lock(&tracepoints_mutex);
        for (iter = begin; iter < end; iter++) {
-               mark_entry = get_tracepoint(iter->name);
+               mark_entry = get_tracepoint((*iter)->name);
                if (mark_entry) {
-                       set_tracepoint(&mark_entry, iter,
+                       set_tracepoint(&mark_entry, *iter,
                                        !!mark_entry->refcount);
                } else {
-                       disable_tracepoint(iter);
+                       disable_tracepoint(*iter);
                }
        }
-       mutex_unlock(&tracepoints_mutex);
 }
 
+#ifdef CONFIG_MODULES
+void module_update_tracepoints(void)
+{
+       struct tp_module *tp_mod;
+
+       list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+               tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
+                       tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
+}
+#else /* CONFIG_MODULES */
+void module_update_tracepoints(void)
+{
+}
+#endif /* CONFIG_MODULES */
+
+
 /*
  * Update probes, removing the faulty probes.
+ * Called with tracepoints_mutex held.
  */
 static void tracepoint_update_probes(void)
 {
        /* Core kernel tracepoints */
-       tracepoint_update_probe_range(__start___tracepoints,
-               __stop___tracepoints);
+       tracepoint_update_probe_range(__start___tracepoints_ptrs,
+               __stop___tracepoints_ptrs);
        /* tracepoints in modules. */
        module_update_tracepoints();
 }
@@ -364,11 +380,12 @@ int tracepoint_probe_register(const char *name, void *probe, void *data)
 
        mutex_lock(&tracepoints_mutex);
        old = tracepoint_add_probe(name, probe, data);
-       mutex_unlock(&tracepoints_mutex);
-       if (IS_ERR(old))
+       if (IS_ERR(old)) {
+               mutex_unlock(&tracepoints_mutex);
                return PTR_ERR(old);
-
+       }
        tracepoint_update_probes();             /* may update entry */
+       mutex_unlock(&tracepoints_mutex);
        release_probes(old);
        return 0;
 }
@@ -407,11 +424,12 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data)
 
        mutex_lock(&tracepoints_mutex);
        old = tracepoint_remove_probe(name, probe, data);
-       mutex_unlock(&tracepoints_mutex);
-       if (IS_ERR(old))
+       if (IS_ERR(old)) {
+               mutex_unlock(&tracepoints_mutex);
                return PTR_ERR(old);
-
+       }
        tracepoint_update_probes();             /* may update entry */
+       mutex_unlock(&tracepoints_mutex);
        release_probes(old);
        return 0;
 }
@@ -494,9 +512,8 @@ void tracepoint_probe_update_all(void)
        if (!list_empty(&old_probes))
                list_replace_init(&old_probes, &release_probes);
        need_update = 0;
-       mutex_unlock(&tracepoints_mutex);
-
        tracepoint_update_probes();
+       mutex_unlock(&tracepoints_mutex);
        list_for_each_entry_safe(pos, next, &release_probes, u.list) {
                list_del(&pos->u.list);
                call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
@@ -514,8 +531,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
  * Will return the first tracepoint in the range if the input tracepoint is
  * NULL.
  */
-int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-       struct tracepoint *begin, struct tracepoint *end)
+static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+       struct tracepoint * const *begin, struct tracepoint * const *end)
 {
        if (!*tracepoint && begin != end) {
                *tracepoint = begin;
@@ -525,25 +542,58 @@ int tracepoint_get_iter_range(struct tracepoint **tracepoint,
                return 1;
        return 0;
 }
-EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
 
+#ifdef CONFIG_MODULES
 static void tracepoint_get_iter(struct tracepoint_iter *iter)
 {
        int found = 0;
+       struct tp_module *iter_mod;
 
        /* Core kernel tracepoints */
        if (!iter->module) {
                found = tracepoint_get_iter_range(&iter->tracepoint,
-                               __start___tracepoints, __stop___tracepoints);
+                               __start___tracepoints_ptrs,
+                               __stop___tracepoints_ptrs);
                if (found)
                        goto end;
        }
-       /* tracepoints in modules. */
-       found = module_get_iter_tracepoints(iter);
+       /* Tracepoints in modules */
+       mutex_lock(&tracepoints_mutex);
+       list_for_each_entry(iter_mod, &tracepoint_module_list, list) {
+               /*
+                * Sorted module list
+                */
+               if (iter_mod < iter->module)
+                       continue;
+               else if (iter_mod > iter->module)
+                       iter->tracepoint = NULL;
+               found = tracepoint_get_iter_range(&iter->tracepoint,
+                       iter_mod->tracepoints_ptrs,
+                       iter_mod->tracepoints_ptrs
+                               + iter_mod->num_tracepoints);
+               if (found) {
+                       iter->module = iter_mod;
+                       break;
+               }
+       }
+       mutex_unlock(&tracepoints_mutex);
 end:
        if (!found)
                tracepoint_iter_reset(iter);
 }
+#else /* CONFIG_MODULES */
+static void tracepoint_get_iter(struct tracepoint_iter *iter)
+{
+       int found = 0;
+
+       /* Core kernel tracepoints */
+       found = tracepoint_get_iter_range(&iter->tracepoint,
+                       __start___tracepoints_ptrs,
+                       __stop___tracepoints_ptrs);
+       if (!found)
+               tracepoint_iter_reset(iter);
+}
+#endif /* CONFIG_MODULES */
 
 void tracepoint_iter_start(struct tracepoint_iter *iter)
 {
@@ -570,26 +620,99 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
 
 void tracepoint_iter_reset(struct tracepoint_iter *iter)
 {
+#ifdef CONFIG_MODULES
        iter->module = NULL;
+#endif /* CONFIG_MODULES */
        iter->tracepoint = NULL;
 }
 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
 
 #ifdef CONFIG_MODULES
+static int tracepoint_module_coming(struct module *mod)
+{
+       struct tp_module *tp_mod, *iter;
+       int ret = 0;
+
+       /*
+        * We skip modules that taint the kernel, especially those with different
+        * module headers (for forced load), to make sure we don't cause a crash.
+        * Staging and out-of-tree GPL modules are fine.
+        */
+       if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+               return 0;
+       mutex_lock(&tracepoints_mutex);
+       tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
+       if (!tp_mod) {
+               ret = -ENOMEM;
+               goto end;
+       }
+       tp_mod->num_tracepoints = mod->num_tracepoints;
+       tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
+
+       /*
+        * tracepoint_module_list is kept sorted by struct module pointer
+        * address for iteration on tracepoints from a seq_file that can release
+        * the mutex between calls.
+        */
+       list_for_each_entry_reverse(iter, &tracepoint_module_list, list) {
+               BUG_ON(iter == tp_mod); /* Should never be in the list twice */
+               if (iter < tp_mod) {
+                       /* We belong to the location right after iter. */
+                       list_add(&tp_mod->list, &iter->list);
+                       goto module_added;
+               }
+       }
+       /* We belong to the beginning of the list */
+       list_add(&tp_mod->list, &tracepoint_module_list);
+module_added:
+       tracepoint_update_probe_range(mod->tracepoints_ptrs,
+               mod->tracepoints_ptrs + mod->num_tracepoints);
+end:
+       mutex_unlock(&tracepoints_mutex);
+       return ret;
+}
+
+static int tracepoint_module_going(struct module *mod)
+{
+       struct tp_module *pos;
+
+       mutex_lock(&tracepoints_mutex);
+       tracepoint_update_probe_range(mod->tracepoints_ptrs,
+               mod->tracepoints_ptrs + mod->num_tracepoints);
+       list_for_each_entry(pos, &tracepoint_module_list, list) {
+               if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
+                       list_del(&pos->list);
+                       kfree(pos);
+                       break;
+               }
+       }
+       /*
+        * In the case of modules that were tainted at "coming", we'll simply
+        * walk through the list without finding it. We cannot use the "tainted"
+        * flag on "going", in case a module taints the kernel only after being
+        * loaded.
+        */
+       mutex_unlock(&tracepoints_mutex);
+       return 0;
+}
 
 int tracepoint_module_notify(struct notifier_block *self,
                             unsigned long val, void *data)
 {
        struct module *mod = data;
+       int ret = 0;
 
        switch (val) {
        case MODULE_STATE_COMING:
+               ret = tracepoint_module_coming(mod);
+               break;
+       case MODULE_STATE_LIVE:
+               break;
        case MODULE_STATE_GOING:
-               tracepoint_update_probe_range(mod->tracepoints,
-                       mod->tracepoints + mod->num_tracepoints);
+               ret = tracepoint_module_going(mod);
                break;
        }
-       return 0;
+       return ret;
 }
 
 struct notifier_block tracepoint_module_nb = {
@@ -602,7 +725,6 @@ static int init_tracepoints(void)
        return register_module_notifier(&tracepoint_module_nb);
 }
 __initcall(init_tracepoints);
-
 #endif /* CONFIG_MODULES */
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS