kernel/cgroup.c: remove dead code
[linux-2.6.git] / kernel / kprobes.c
index 456eced..7a86e64 100644 (file)
  *             <prasanna@in.ibm.com> added function-return probes.
  */
 #include <linux/kprobes.h>
-#include <linux/spinlock.h>
 #include <linux/hash.h>
 #include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
 #include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kallsyms.h>
+#include <linux/freezer.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kdebug.h>
+
+#include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
-#include <asm/kdebug.h>
+#include <asm/uaccess.h>
 
 #define KPROBE_HASH_BITS 6
 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
 
+
+/*
+ * Some oddball architectures like 64bit powerpc have function descriptors
+ * so this must be overridable.
+ */
+#ifndef kprobe_lookup_name
+#define kprobe_lookup_name(name, addr) \
+       addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
+#endif
+
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
-unsigned int kprobe_cpu = NR_CPUS;
-static DEFINE_SPINLOCK(kprobe_lock);
-static struct kprobe *curr_kprobe;
+/* NOTE: change this value only with kprobe_mutex held */
+static bool kprobe_enabled;
+
+DEFINE_MUTEX(kprobe_mutex);            /* Protects kprobe_table */
+DEFINE_SPINLOCK(kretprobe_lock);       /* Protects kretprobe_inst_table */
+static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
+/*
+ * kprobe->ainsn.insn points to the copy of the instruction to be
+ * single-stepped. x86_64, POWER4 and above have no-exec support and
+ * stepping on the instruction on a vmalloced/kmalloced/data page
+ * is a recipe for disaster
+ */
+#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
 
-/* Locks kprobe: irqs must be disabled */
-void lock_kprobes(void)
+struct kprobe_insn_page {
+       struct hlist_node hlist;
+       kprobe_opcode_t *insns;         /* Page of instruction slots */
+       char slot_used[INSNS_PER_PAGE];
+       int nused;
+       int ngarbage;
+};
+
+enum kprobe_slot_state {
+       SLOT_CLEAN = 0,
+       SLOT_DIRTY = 1,
+       SLOT_USED = 2,
+};
+
+static struct hlist_head kprobe_insn_pages;
+static int kprobe_garbage_slots;
+static int collect_garbage_slots(void);
+
+static int __kprobes check_safety(void)
 {
-       spin_lock(&kprobe_lock);
-       kprobe_cpu = smp_processor_id();
+       int ret = 0;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+       ret = freeze_processes();
+       if (ret == 0) {
+               struct task_struct *p, *q;
+               do_each_thread(p, q) {
+                       if (p != current && p->state == TASK_RUNNING &&
+                           p->pid != 0) {
+                               printk("Check failed: %s is running\n",p->comm);
+                               ret = -1;
+                               goto loop_end;
+                       }
+               } while_each_thread(p, q);
+       }
+loop_end:
+       thaw_processes();
+#else
+       synchronize_sched();
+#endif
+       return ret;
+}
+
+/**
+ * get_insn_slot() - Find a slot on an executable page for an instruction.
+ * We allocate an executable page if there's no room on existing ones.
+ */
+kprobe_opcode_t __kprobes *get_insn_slot(void)
+{
+       struct kprobe_insn_page *kip;
+       struct hlist_node *pos;
+
+ retry:
+       hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+               if (kip->nused < INSNS_PER_PAGE) {
+                       int i;
+                       for (i = 0; i < INSNS_PER_PAGE; i++) {
+                               if (kip->slot_used[i] == SLOT_CLEAN) {
+                                       kip->slot_used[i] = SLOT_USED;
+                                       kip->nused++;
+                                       return kip->insns + (i * MAX_INSN_SIZE);
+                               }
+                       }
+                       /* Surprise!  No unused slots.  Fix kip->nused. */
+                       kip->nused = INSNS_PER_PAGE;
+               }
+       }
+
+       /* If there are any garbage slots, collect it and try again. */
+       if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
+               goto retry;
+       }
+       /* All out of space.  Need to allocate a new page. Use slot 0. */
+       kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
+       if (!kip)
+               return NULL;
+
+       /*
+        * Use module_alloc so this page is within +/- 2GB of where the
+        * kernel image and loaded module images reside. This is required
+        * so x86_64 can correctly handle the %rip-relative fixups.
+        */
+       kip->insns = module_alloc(PAGE_SIZE);
+       if (!kip->insns) {
+               kfree(kip);
+               return NULL;
+       }
+       INIT_HLIST_NODE(&kip->hlist);
+       hlist_add_head(&kip->hlist, &kprobe_insn_pages);
+       memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
+       kip->slot_used[0] = SLOT_USED;
+       kip->nused = 1;
+       kip->ngarbage = 0;
+       return kip->insns;
 }
 
-void unlock_kprobes(void)
+/* Return 1 if all garbages are collected, otherwise 0. */
+static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
 {
-       kprobe_cpu = NR_CPUS;
-       spin_unlock(&kprobe_lock);
+       kip->slot_used[idx] = SLOT_CLEAN;
+       kip->nused--;
+       if (kip->nused == 0) {
+               /*
+                * Page is no longer in use.  Free it unless
+                * it's the last one.  We keep the last one
+                * so as not to have to set it up again the
+                * next time somebody inserts a probe.
+                */
+               hlist_del(&kip->hlist);
+               if (hlist_empty(&kprobe_insn_pages)) {
+                       INIT_HLIST_NODE(&kip->hlist);
+                       hlist_add_head(&kip->hlist,
+                                      &kprobe_insn_pages);
+               } else {
+                       module_free(NULL, kip->insns);
+                       kfree(kip);
+               }
+               return 1;
+       }
+       return 0;
 }
 
-/* You have to be holding the kprobe_lock */
-struct kprobe *get_kprobe(void *addr)
+static int __kprobes collect_garbage_slots(void)
+{
+       struct kprobe_insn_page *kip;
+       struct hlist_node *pos, *next;
+
+       /* Ensure no-one is preepmted on the garbages */
+       if (check_safety() != 0)
+               return -EAGAIN;
+
+       hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
+               int i;
+               if (kip->ngarbage == 0)
+                       continue;
+               kip->ngarbage = 0;      /* we will collect all garbages */
+               for (i = 0; i < INSNS_PER_PAGE; i++) {
+                       if (kip->slot_used[i] == SLOT_DIRTY &&
+                           collect_one_slot(kip, i))
+                               break;
+               }
+       }
+       kprobe_garbage_slots = 0;
+       return 0;
+}
+
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
+{
+       struct kprobe_insn_page *kip;
+       struct hlist_node *pos;
+
+       hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
+               if (kip->insns <= slot &&
+                   slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
+                       int i = (slot - kip->insns) / MAX_INSN_SIZE;
+                       if (dirty) {
+                               kip->slot_used[i] = SLOT_DIRTY;
+                               kip->ngarbage++;
+                       } else {
+                               collect_one_slot(kip, i);
+                       }
+                       break;
+               }
+       }
+
+       if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
+               collect_garbage_slots();
+}
+#endif
+
+/* We have preemption disabled.. so it is safe to use __ versions */
+static inline void set_kprobe_instance(struct kprobe *kp)
+{
+       __get_cpu_var(kprobe_instance) = kp;
+}
+
+static inline void reset_kprobe_instance(void)
+{
+       __get_cpu_var(kprobe_instance) = NULL;
+}
+
+/*
+ * This routine is called either:
+ *     - under the kprobe_mutex - during kprobe_[un]register()
+ *                             OR
+ *     - with preemption disabled - from arch/xxx/kernel/kprobes.c
+ */
+struct kprobe __kprobes *get_kprobe(void *addr)
 {
        struct hlist_head *head;
        struct hlist_node *node;
+       struct kprobe *p;
 
        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
-       hlist_for_each(node, head) {
-               struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
+       hlist_for_each_entry_rcu(p, node, head, hlist) {
                if (p->addr == addr)
                        return p;
        }
@@ -82,109 +285,80 @@ struct kprobe *get_kprobe(void *addr)
  * Aggregate handlers for multiple kprobes support - these handlers
  * take care of invoking the individual kprobe handlers on p->list
  */
-static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe *kp;
 
-       list_for_each_entry(kp, &p->list, list) {
+       list_for_each_entry_rcu(kp, &p->list, list) {
                if (kp->pre_handler) {
-                       curr_kprobe = kp;
-                       kp->pre_handler(kp, regs);
-                       curr_kprobe = NULL;
+                       set_kprobe_instance(kp);
+                       if (kp->pre_handler(kp, regs))
+                               return 1;
                }
+               reset_kprobe_instance();
        }
        return 0;
 }
 
-static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
-                             unsigned long flags)
+static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
+                                       unsigned long flags)
 {
        struct kprobe *kp;
 
-       list_for_each_entry(kp, &p->list, list) {
+       list_for_each_entry_rcu(kp, &p->list, list) {
                if (kp->post_handler) {
-                       curr_kprobe = kp;
+                       set_kprobe_instance(kp);
                        kp->post_handler(kp, regs, flags);
-                       curr_kprobe = NULL;
+                       reset_kprobe_instance();
                }
        }
-       return;
 }
 
-static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
-                             int trapnr)
+static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
+                                       int trapnr)
 {
+       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+
        /*
         * if we faulted "during" the execution of a user specified
         * probe handler, invoke just that probe's fault handler
         */
-       if (curr_kprobe && curr_kprobe->fault_handler) {
-               if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
+       if (cur && cur->fault_handler) {
+               if (cur->fault_handler(cur, regs, trapnr))
                        return 1;
        }
        return 0;
 }
 
-struct kprobe trampoline_p = {
-               .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
-               .pre_handler = trampoline_probe_handler,
-               .post_handler = trampoline_post_handler
-};
-
-struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
-{
-       struct hlist_node *node;
-       struct kretprobe_instance *ri;
-       hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
-               return ri;
-       return NULL;
-}
-
-static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
+static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
-       struct hlist_node *node;
-       struct kretprobe_instance *ri;
-       hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
-               return ri;
-       return NULL;
-}
-
-struct kretprobe_instance *get_rp_inst(void *sara)
-{
-       struct hlist_head *head;
-       struct hlist_node *node;
-       struct task_struct *tsk;
-       struct kretprobe_instance *ri;
+       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       int ret = 0;
 
-       tsk = arch_get_kprobe_task(sara);
-       head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
-       hlist_for_each_entry(ri, node, head, hlist) {
-               if (ri->stack_addr == sara)
-                       return ri;
+       if (cur && cur->break_handler) {
+               if (cur->break_handler(cur, regs))
+                       ret = 1;
        }
-       return NULL;
+       reset_kprobe_instance();
+       return ret;
 }
 
-void add_rp_inst(struct kretprobe_instance *ri)
+/* Walks the list and increments nmissed count for multiprobe case */
+void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
 {
-       struct task_struct *tsk;
-       /*
-        * Remove rp inst off the free list -
-        * Add it back when probed function returns
-        */
-       hlist_del(&ri->uflist);
-       tsk = arch_get_kprobe_task(ri->stack_addr);
-       /* Add rp inst onto table */
-       INIT_HLIST_NODE(&ri->hlist);
-       hlist_add_head(&ri->hlist,
-                       &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
-
-       /* Also add this rp inst to the used list. */
-       INIT_HLIST_NODE(&ri->uflist);
-       hlist_add_head(&ri->uflist, &ri->rp->used_instances);
+       struct kprobe *kp;
+       if (p->pre_handler != aggr_pre_handler) {
+               p->nmissed++;
+       } else {
+               list_for_each_entry_rcu(kp, &p->list, list)
+                       kp->nmissed++;
+       }
+       return;
 }
 
-void recycle_rp_inst(struct kretprobe_instance *ri)
+/* Called with kretprobe_lock held */
+void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
+                               struct hlist_head *head)
 {
        /* remove rp inst off the rprobe_inst_table */
        hlist_del(&ri->hlist);
@@ -196,65 +370,78 @@ void recycle_rp_inst(struct kretprobe_instance *ri)
                hlist_add_head(&ri->uflist, &ri->rp->free_instances);
        } else
                /* Unregistering */
-               kfree(ri);
+               hlist_add_head(&ri->hlist, head);
 }
 
-struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
+struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
 {
        return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
 }
 
-struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
+/*
+ * This function is called from finish_task_switch when task tk becomes dead,
+ * so that we can recycle any function-return probe instances associated
+ * with this task. These left over instances represent probed functions
+ * that have been called but will never return.
+ */
+void __kprobes kprobe_flush_task(struct task_struct *tk)
 {
-       struct task_struct *tsk;
-       struct hlist_head *head;
-       struct hlist_node *node;
        struct kretprobe_instance *ri;
+       struct hlist_head *head, empty_rp;
+       struct hlist_node *node, *tmp;
+       unsigned long flags = 0;
 
-       head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
+       INIT_HLIST_HEAD(&empty_rp);
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       head = kretprobe_inst_table_head(tk);
+       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+               if (ri->task == tk)
+                       recycle_rp_inst(ri, &empty_rp);
+       }
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
 
-       hlist_for_each_entry(ri, node, head, hlist) {
-               tsk = arch_get_kprobe_task(ri->stack_addr);
-               if (tsk == tk)
-                       return ri;
+       hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+               hlist_del(&ri->hlist);
+               kfree(ri);
        }
-       return NULL;
 }
 
-/*
- * This function is called from do_exit or do_execv when task tk's stack is
- * about to be recycled. Recycle any function-return probe instances
- * associated with this task. These represent probed functions that have
- * been called but may never return.
- */
-void kprobe_flush_task(struct task_struct *tk)
+static inline void free_rp_inst(struct kretprobe *rp)
 {
-       unsigned long flags = 0;
-       spin_lock_irqsave(&kprobe_lock, flags);
-       arch_kprobe_flush_task(tk);
-       spin_unlock_irqrestore(&kprobe_lock, flags);
+       struct kretprobe_instance *ri;
+       struct hlist_node *pos, *next;
+
+       hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
+               hlist_del(&ri->uflist);
+               kfree(ri);
+       }
 }
 
 /*
- * This kprobe pre_handler is registered with every kretprobe. When probe
- * hits it will set up the return probe.
+ * Keep all fields in the kprobe consistent
  */
-static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
+static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
 {
-       struct kretprobe *rp = container_of(p, struct kretprobe, kp);
-
-       /*TODO: consider to only swap the RA after the last pre_handler fired */
-       arch_prepare_kretprobe(rp, regs);
-       return 0;
+       memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
+       memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
 }
 
-static inline void free_rp_inst(struct kretprobe *rp)
+/*
+* Add the new probe to old_p->list. Fail if this is the
+* second jprobe at the address - two jprobes can't coexist
+*/
+static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
 {
-       struct kretprobe_instance *ri;
-       while ((ri = get_free_rp_inst(rp)) != NULL) {
-               hlist_del(&ri->uflist);
-               kfree(ri);
-       }
+       if (p->break_handler) {
+               if (old_p->break_handler)
+                       return -EEXIST;
+               list_add_tail_rcu(&p->list, &old_p->list);
+               old_p->break_handler = aggr_break_handler;
+       } else
+               list_add_rcu(&p->list, &old_p->list);
+       if (p->post_handler && !old_p->post_handler)
+               old_p->post_handler = aggr_post_handler;
+       return 0;
 }
 
 /*
@@ -263,143 +450,295 @@ static inline void free_rp_inst(struct kretprobe *rp)
  */
 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
 {
+       copy_kprobe(p, ap);
+       flush_insn_slot(ap);
        ap->addr = p->addr;
-       memcpy(&ap->opcode, &p->opcode, sizeof(kprobe_opcode_t));
-       memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
-
        ap->pre_handler = aggr_pre_handler;
-       ap->post_handler = aggr_post_handler;
        ap->fault_handler = aggr_fault_handler;
+       if (p->post_handler)
+               ap->post_handler = aggr_post_handler;
+       if (p->break_handler)
+               ap->break_handler = aggr_break_handler;
 
        INIT_LIST_HEAD(&ap->list);
-       list_add(&p->list, &ap->list);
+       list_add_rcu(&p->list, &ap->list);
 
-       INIT_HLIST_NODE(&ap->hlist);
-       hlist_del(&p->hlist);
-       hlist_add_head(&ap->hlist,
-               &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
+       hlist_replace_rcu(&p->hlist, &ap->hlist);
 }
 
 /*
  * This is the second or subsequent kprobe at the address - handle
  * the intricacies
- * TODO: Move kcalloc outside the spinlock
  */
-static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
+static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
+                                         struct kprobe *p)
 {
        int ret = 0;
        struct kprobe *ap;
 
-       if (old_p->break_handler || p->break_handler) {
-               ret = -EEXIST;  /* kprobe and jprobe can't (yet) coexist */
-       } else if (old_p->pre_handler == aggr_pre_handler) {
-               list_add(&p->list, &old_p->list);
+       if (old_p->pre_handler == aggr_pre_handler) {
+               copy_kprobe(old_p, p);
+               ret = add_new_kprobe(old_p, p);
        } else {
-               ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
+               ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
                if (!ap)
                        return -ENOMEM;
                add_aggr_kprobe(ap, old_p);
-               list_add(&p->list, &ap->list);
+               copy_kprobe(ap, p);
+               ret = add_new_kprobe(ap, p);
        }
        return ret;
 }
 
-/* kprobe removal house-keeping routines */
-static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
+static int __kprobes in_kprobes_functions(unsigned long addr)
 {
-       arch_disarm_kprobe(p);
-       hlist_del(&p->hlist);
-       spin_unlock_irqrestore(&kprobe_lock, flags);
-       arch_remove_kprobe(p);
-}
-
-static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
-               struct kprobe *p, unsigned long flags)
-{
-       list_del(&p->list);
-       if (list_empty(&old_p->list)) {
-               cleanup_kprobe(old_p, flags);
-               kfree(old_p);
-       } else
-               spin_unlock_irqrestore(&kprobe_lock, flags);
+       if (addr >= (unsigned long)__kprobes_text_start &&
+           addr < (unsigned long)__kprobes_text_end)
+               return -EINVAL;
+       return 0;
 }
 
-int register_kprobe(struct kprobe *p)
+static int __kprobes __register_kprobe(struct kprobe *p,
+       unsigned long called_from)
 {
        int ret = 0;
-       unsigned long flags = 0;
        struct kprobe *old_p;
+       struct module *probed_mod;
 
-       if ((ret = arch_prepare_kprobe(p)) != 0) {
-               goto rm_kprobe;
+       /*
+        * If we have a symbol_name argument look it up,
+        * and add it to the address.  That way the addr
+        * field can either be global or relative to a symbol.
+        */
+       if (p->symbol_name) {
+               if (p->addr)
+                       return -EINVAL;
+               kprobe_lookup_name(p->symbol_name, p->addr);
        }
-       spin_lock_irqsave(&kprobe_lock, flags);
-       old_p = get_kprobe(p->addr);
+
+       if (!p->addr)
+               return -EINVAL;
+       p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
+
+       if (!kernel_text_address((unsigned long) p->addr) ||
+           in_kprobes_functions((unsigned long) p->addr))
+               return -EINVAL;
+
+       p->mod_refcounted = 0;
+
+       /*
+        * Check if are we probing a module.
+        */
+       probed_mod = module_text_address((unsigned long) p->addr);
+       if (probed_mod) {
+               struct module *calling_mod = module_text_address(called_from);
+               /*
+                * We must allow modules to probe themself and in this case
+                * avoid incrementing the module refcount, so as to allow
+                * unloading of self probing modules.
+                */
+               if (calling_mod && calling_mod != probed_mod) {
+                       if (unlikely(!try_module_get(probed_mod)))
+                               return -EINVAL;
+                       p->mod_refcounted = 1;
+               } else
+                       probed_mod = NULL;
+       }
+
        p->nmissed = 0;
+       mutex_lock(&kprobe_mutex);
+       old_p = get_kprobe(p->addr);
        if (old_p) {
                ret = register_aggr_kprobe(old_p, p);
                goto out;
        }
 
-       arch_copy_kprobe(p);
+       ret = arch_prepare_kprobe(p);
+       if (ret)
+               goto out;
+
        INIT_HLIST_NODE(&p->hlist);
-       hlist_add_head(&p->hlist,
+       hlist_add_head_rcu(&p->hlist,
                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
 
-       arch_arm_kprobe(p);
+       if (kprobe_enabled)
+               arch_arm_kprobe(p);
 
 out:
-       spin_unlock_irqrestore(&kprobe_lock, flags);
-rm_kprobe:
-       if (ret == -EEXIST)
-               arch_remove_kprobe(p);
+       mutex_unlock(&kprobe_mutex);
+
+       if (ret && probed_mod)
+               module_put(probed_mod);
        return ret;
 }
 
-void unregister_kprobe(struct kprobe *p)
+int __kprobes register_kprobe(struct kprobe *p)
 {
-       unsigned long flags;
-       struct kprobe *old_p;
+       return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
+}
 
-       spin_lock_irqsave(&kprobe_lock, flags);
+void __kprobes unregister_kprobe(struct kprobe *p)
+{
+       struct module *mod;
+       struct kprobe *old_p, *list_p;
+       int cleanup_p;
+
+       mutex_lock(&kprobe_mutex);
        old_p = get_kprobe(p->addr);
-       if (old_p) {
-               if (old_p->pre_handler == aggr_pre_handler)
-                       cleanup_aggr_kprobe(old_p, p, flags);
-               else
-                       cleanup_kprobe(p, flags);
-       } else
-               spin_unlock_irqrestore(&kprobe_lock, flags);
+       if (unlikely(!old_p)) {
+               mutex_unlock(&kprobe_mutex);
+               return;
+       }
+       if (p != old_p) {
+               list_for_each_entry_rcu(list_p, &old_p->list, list)
+                       if (list_p == p)
+                       /* kprobe p is a valid probe */
+                               goto valid_p;
+               mutex_unlock(&kprobe_mutex);
+               return;
+       }
+valid_p:
+       if (old_p == p ||
+           (old_p->pre_handler == aggr_pre_handler &&
+            p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
+               /*
+                * Only probe on the hash list. Disarm only if kprobes are
+                * enabled - otherwise, the breakpoint would already have
+                * been removed. We save on flushing icache.
+                */
+               if (kprobe_enabled)
+                       arch_disarm_kprobe(p);
+               hlist_del_rcu(&old_p->hlist);
+               cleanup_p = 1;
+       } else {
+               list_del_rcu(&p->list);
+               cleanup_p = 0;
+       }
+
+       mutex_unlock(&kprobe_mutex);
+
+       synchronize_sched();
+       if (p->mod_refcounted) {
+               mod = module_text_address((unsigned long)p->addr);
+               if (mod)
+                       module_put(mod);
+       }
+
+       if (cleanup_p) {
+               if (p != old_p) {
+                       list_del_rcu(&p->list);
+                       kfree(old_p);
+               }
+               arch_remove_kprobe(p);
+       } else {
+               mutex_lock(&kprobe_mutex);
+               if (p->break_handler)
+                       old_p->break_handler = NULL;
+               if (p->post_handler){
+                       list_for_each_entry_rcu(list_p, &old_p->list, list){
+                               if (list_p->post_handler){
+                                       cleanup_p = 2;
+                                       break;
+                               }
+                       }
+                       if (cleanup_p == 0)
+                               old_p->post_handler = NULL;
+               }
+               mutex_unlock(&kprobe_mutex);
+       }
 }
 
 static struct notifier_block kprobe_exceptions_nb = {
        .notifier_call = kprobe_exceptions_notify,
-       .priority = 0x7fffffff /* we need to notified first */
+       .priority = 0x7fffffff /* we need to be notified first */
 };
 
-int register_jprobe(struct jprobe *jp)
+unsigned long __weak arch_deref_entry_point(void *entry)
 {
+       return (unsigned long)entry;
+}
+
+int __kprobes register_jprobe(struct jprobe *jp)
+{
+       unsigned long addr = arch_deref_entry_point(jp->entry);
+
+       if (!kernel_text_address(addr))
+               return -EINVAL;
+
        /* Todo: Verify probepoint is a function entry point */
        jp->kp.pre_handler = setjmp_pre_handler;
        jp->kp.break_handler = longjmp_break_handler;
 
-       return register_kprobe(&jp->kp);
+       return __register_kprobe(&jp->kp,
+               (unsigned long)__builtin_return_address(0));
 }
 
-void unregister_jprobe(struct jprobe *jp)
+void __kprobes unregister_jprobe(struct jprobe *jp)
 {
        unregister_kprobe(&jp->kp);
 }
 
 #ifdef ARCH_SUPPORTS_KRETPROBES
 
-int register_kretprobe(struct kretprobe *rp)
+/*
+ * This kprobe pre_handler is registered with every kretprobe. When probe
+ * hits it will set up the return probe.
+ */
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+                                          struct pt_regs *regs)
+{
+       struct kretprobe *rp = container_of(p, struct kretprobe, kp);
+       unsigned long flags = 0;
+
+       /*TODO: consider to only swap the RA after the last pre_handler fired */
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       if (!hlist_empty(&rp->free_instances)) {
+               struct kretprobe_instance *ri;
+
+               ri = hlist_entry(rp->free_instances.first,
+                                struct kretprobe_instance, uflist);
+               ri->rp = rp;
+               ri->task = current;
+
+               if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+                       spin_unlock_irqrestore(&kretprobe_lock, flags);
+                       return 0;
+               }
+
+               arch_prepare_kretprobe(ri, regs);
+
+               /* XXX(hch): why is there no hlist_move_head? */
+               hlist_del(&ri->uflist);
+               hlist_add_head(&ri->uflist, &ri->rp->used_instances);
+               hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
+       } else
+               rp->nmissed++;
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       return 0;
+}
+
+int __kprobes register_kretprobe(struct kretprobe *rp)
 {
        int ret = 0;
        struct kretprobe_instance *inst;
        int i;
+       void *addr = rp->kp.addr;
+
+       if (kretprobe_blacklist_size) {
+               if (addr == NULL)
+                       kprobe_lookup_name(rp->kp.symbol_name, addr);
+               addr += rp->kp.offset;
+
+               for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+                       if (kretprobe_blacklist[i].addr == addr)
+                               return -EINVAL;
+               }
+       }
 
        rp->kp.pre_handler = pre_handler_kretprobe;
+       rp->kp.post_handler = NULL;
+       rp->kp.fault_handler = NULL;
+       rp->kp.break_handler = NULL;
 
        /* Pre-allocate memory for max kretprobe instances */
        if (rp->maxactive <= 0) {
@@ -412,7 +751,8 @@ int register_kretprobe(struct kretprobe *rp)
        INIT_HLIST_HEAD(&rp->used_instances);
        INIT_HLIST_HEAD(&rp->free_instances);
        for (i = 0; i < rp->maxactive; i++) {
-               inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
+               inst = kmalloc(sizeof(struct kretprobe_instance) +
+                              rp->data_size, GFP_KERNEL);
                if (inst == NULL) {
                        free_rp_inst(rp);
                        return -ENOMEM;
@@ -423,34 +763,43 @@ int register_kretprobe(struct kretprobe *rp)
 
        rp->nmissed = 0;
        /* Establish function entry probe point */
-       if ((ret = register_kprobe(&rp->kp)) != 0)
+       if ((ret = __register_kprobe(&rp->kp,
+               (unsigned long)__builtin_return_address(0))) != 0)
                free_rp_inst(rp);
        return ret;
 }
 
 #else /* ARCH_SUPPORTS_KRETPROBES */
 
-int register_kretprobe(struct kretprobe *rp)
+int __kprobes register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
 }
 
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+                                          struct pt_regs *regs)
+{
+       return 0;
+}
+
 #endif /* ARCH_SUPPORTS_KRETPROBES */
 
-void unregister_kretprobe(struct kretprobe *rp)
+void __kprobes unregister_kretprobe(struct kretprobe *rp)
 {
        unsigned long flags;
        struct kretprobe_instance *ri;
+       struct hlist_node *pos, *next;
 
        unregister_kprobe(&rp->kp);
+
        /* No race here */
-       spin_lock_irqsave(&kprobe_lock, flags);
-       free_rp_inst(rp);
-       while ((ri = get_used_rp_inst(rp)) != NULL) {
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
                ri->rp = NULL;
                hlist_del(&ri->uflist);
        }
-       spin_unlock_irqrestore(&kprobe_lock, flags);
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       free_rp_inst(rp);
 }
 
 static int __init init_kprobes(void)
@@ -464,19 +813,260 @@ static int __init init_kprobes(void)
                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
        }
 
-       err = register_die_notifier(&kprobe_exceptions_nb);
-       /* Register the trampoline probe for return probe */
-       register_kprobe(&trampoline_p);
+       if (kretprobe_blacklist_size) {
+               /* lookup the function address from its name */
+               for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
+                       kprobe_lookup_name(kretprobe_blacklist[i].name,
+                                          kretprobe_blacklist[i].addr);
+                       if (!kretprobe_blacklist[i].addr)
+                               printk("kretprobe: lookup failed: %s\n",
+                                      kretprobe_blacklist[i].name);
+               }
+       }
+
+       /* By default, kprobes are enabled */
+       kprobe_enabled = true;
+
+       err = arch_init_kprobes();
+       if (!err)
+               err = register_die_notifier(&kprobe_exceptions_nb);
+
+       if (!err)
+               init_test_probes();
        return err;
 }
 
-__initcall(init_kprobes);
+#ifdef CONFIG_DEBUG_FS
+static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
+               const char *sym, int offset,char *modname)
+{
+       char *kprobe_type;
+
+       if (p->pre_handler == pre_handler_kretprobe)
+               kprobe_type = "r";
+       else if (p->pre_handler == setjmp_pre_handler)
+               kprobe_type = "j";
+       else
+               kprobe_type = "k";
+       if (sym)
+               seq_printf(pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type,
+                       sym, offset, (modname ? modname : " "));
+       else
+               seq_printf(pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
+}
+
+static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
+{
+       return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
+}
+
+static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
+{
+       (*pos)++;
+       if (*pos >= KPROBE_TABLE_SIZE)
+               return NULL;
+       return pos;
+}
+
+static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
+{
+       /* Nothing to do */
+}
+
+static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p, *kp;
+       const char *sym = NULL;
+       unsigned int i = *(loff_t *) v;
+       unsigned long offset = 0;
+       char *modname, namebuf[128];
+
+       head = &kprobe_table[i];
+       preempt_disable();
+       hlist_for_each_entry_rcu(p, node, head, hlist) {
+               sym = kallsyms_lookup((unsigned long)p->addr, NULL,
+                                       &offset, &modname, namebuf);
+               if (p->pre_handler == aggr_pre_handler) {
+                       list_for_each_entry_rcu(kp, &p->list, list)
+                               report_probe(pi, kp, sym, offset, modname);
+               } else
+                       report_probe(pi, p, sym, offset, modname);
+       }
+       preempt_enable();
+       return 0;
+}
+
+static struct seq_operations kprobes_seq_ops = {
+       .start = kprobe_seq_start,
+       .next  = kprobe_seq_next,
+       .stop  = kprobe_seq_stop,
+       .show  = show_kprobe_addr
+};
+
+static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
+{
+       return seq_open(filp, &kprobes_seq_ops);
+}
+
+static struct file_operations debugfs_kprobes_operations = {
+       .open           = kprobes_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+static void __kprobes enable_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       mutex_lock(&kprobe_mutex);
+
+       /* If kprobes are already enabled, just return */
+       if (kprobe_enabled)
+               goto already_enabled;
+
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist)
+                       arch_arm_kprobe(p);
+       }
+
+       kprobe_enabled = true;
+       printk(KERN_INFO "Kprobes globally enabled\n");
+
+already_enabled:
+       mutex_unlock(&kprobe_mutex);
+       return;
+}
+
+static void __kprobes disable_all_kprobes(void)
+{
+       struct hlist_head *head;
+       struct hlist_node *node;
+       struct kprobe *p;
+       unsigned int i;
+
+       mutex_lock(&kprobe_mutex);
+
+       /* If kprobes are already disabled, just return */
+       if (!kprobe_enabled)
+               goto already_disabled;
+
+       kprobe_enabled = false;
+       printk(KERN_INFO "Kprobes globally disabled\n");
+       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+               head = &kprobe_table[i];
+               hlist_for_each_entry_rcu(p, node, head, hlist) {
+                       if (!arch_trampoline_kprobe(p))
+                               arch_disarm_kprobe(p);
+               }
+       }
+
+       mutex_unlock(&kprobe_mutex);
+       /* Allow all currently running kprobes to complete */
+       synchronize_sched();
+       return;
+
+already_disabled:
+       mutex_unlock(&kprobe_mutex);
+       return;
+}
+
+/*
+ * XXX: The debugfs bool file interface doesn't allow for callbacks
+ * when the bool state is switched. We can reuse that facility when
+ * available
+ */
+static ssize_t read_enabled_file_bool(struct file *file,
+              char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char buf[3];
+
+       if (kprobe_enabled)
+               buf[0] = '1';
+       else
+               buf[0] = '0';
+       buf[1] = '\n';
+       buf[2] = 0x00;
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_enabled_file_bool(struct file *file,
+              const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       char buf[32];
+       int buf_size;
+
+       buf_size = min(count, (sizeof(buf)-1));
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       switch (buf[0]) {
+       case 'y':
+       case 'Y':
+       case '1':
+               enable_all_kprobes();
+               break;
+       case 'n':
+       case 'N':
+       case '0':
+               disable_all_kprobes();
+               break;
+       }
+
+       return count;
+}
+
+static struct file_operations fops_kp = {
+       .read =         read_enabled_file_bool,
+       .write =        write_enabled_file_bool,
+};
+
+static int __kprobes debugfs_kprobe_init(void)
+{
+       struct dentry *dir, *file;
+       unsigned int value = 1;
+
+       dir = debugfs_create_dir("kprobes", NULL);
+       if (!dir)
+               return -ENOMEM;
+
+       file = debugfs_create_file("list", 0444, dir, NULL,
+                               &debugfs_kprobes_operations);
+       if (!file) {
+               debugfs_remove(dir);
+               return -ENOMEM;
+       }
+
+       file = debugfs_create_file("enabled", 0600, dir,
+                                       &value, &fops_kp);
+       if (!file) {
+               debugfs_remove(dir);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+late_initcall(debugfs_kprobe_init);
+#endif /* CONFIG_DEBUG_FS */
+
+module_init(init_kprobes);
 
 EXPORT_SYMBOL_GPL(register_kprobe);
 EXPORT_SYMBOL_GPL(unregister_kprobe);
 EXPORT_SYMBOL_GPL(register_jprobe);
 EXPORT_SYMBOL_GPL(unregister_jprobe);
+#ifdef CONFIG_KPROBES
 EXPORT_SYMBOL_GPL(jprobe_return);
+#endif
+
+#ifdef CONFIG_KPROBES
 EXPORT_SYMBOL_GPL(register_kretprobe);
 EXPORT_SYMBOL_GPL(unregister_kretprobe);
-
+#endif