]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/kprobes.c
ARM: EXYNOS: Add missing definition for IRQ_I2S0
[linux-2.6.git] / kernel / kprobes.c
index ba4d4c0740cff601aa445e15e638674cc495f0a0..c62b8546cc90e0b39de0e824b0f7ed7179d8ba92 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/moduleloader.h>
 #include <linux/kallsyms.h>
 #include <linux/freezer.h>
@@ -78,10 +78,10 @@ static bool kprobes_all_disarmed;
 static DEFINE_MUTEX(kprobe_mutex);
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 static struct {
-       spinlock_t lock ____cacheline_aligned_in_smp;
+       raw_spinlock_t lock ____cacheline_aligned_in_smp;
 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
 
-static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
+static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 {
        return &(kretprobe_table_locks[hash].lock);
 }
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
 {
-       __get_cpu_var(kprobe_instance) = kp;
+       __this_cpu_write(kprobe_instance, kp);
 }
 
 static inline void reset_kprobe_instance(void)
 {
-       __get_cpu_var(kprobe_instance) = NULL;
+       __this_cpu_write(kprobe_instance, NULL);
 }
 
 /*
@@ -480,8 +480,6 @@ static DECLARE_COMPLETION(optimizer_comp);
  */
 static __kprobes void do_optimize_kprobes(void)
 {
-       struct optimized_kprobe *op, *tmp;
-
        /* Optimization never be done when disarmed */
        if (kprobes_all_disarmed || !kprobes_allow_optimization ||
            list_empty(&optimizing_list))
@@ -499,12 +497,7 @@ static __kprobes void do_optimize_kprobes(void)
         */
        get_online_cpus();
        mutex_lock(&text_mutex);
-       list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
-               WARN_ON(kprobe_disabled(&op->kp));
-               if (arch_optimize_kprobe(op) < 0)
-                       op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
-               list_del_init(&op->list);
-       }
+       arch_optimize_kprobes(&optimizing_list);
        mutex_unlock(&text_mutex);
        put_online_cpus();
 }
@@ -524,9 +517,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
        /* Ditto to do_optimize_kprobes */
        get_online_cpus();
        mutex_lock(&text_mutex);
-       list_for_each_entry_safe(op, tmp, &unoptimizing_list, list) {
-               /* Unoptimize kprobes */
-               arch_unoptimize_kprobe(op);
+       arch_unoptimize_kprobes(&unoptimizing_list, free_list);
+       /* Loop free_list for disarming */
+       list_for_each_entry_safe(op, tmp, free_list, list) {
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -537,8 +530,6 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
                         * (reclaiming is done by do_free_cleaned_kprobes.)
                         */
                        hlist_del_rcu(&op->kp.hlist);
-                       /* Move only unused probes on free_list */
-                       list_move(&op->list, free_list);
                } else
                        list_del_init(&op->list);
        }
@@ -598,8 +589,12 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        mutex_unlock(&kprobe_mutex);
        mutex_unlock(&module_mutex);
 
-       /* Wake up all waiters */
-       complete_all(&optimizer_comp);
+       /* Step 5: Kick optimizer again if needed */
+       if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
+               kick_kprobe_optimizer();
+       else
+               /* Wake up all waiters */
+               complete_all(&optimizer_comp);
 }
 
 /* Wait for completing optimization and unoptimization */
@@ -692,6 +687,27 @@ static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
        }
 }
 
+/* Cancel unoptimizing for reusing */
+static void reuse_unused_kprobe(struct kprobe *ap)
+{
+       struct optimized_kprobe *op;
+
+       BUG_ON(!kprobe_unused(ap));
+       /*
+        * Unused kprobe MUST be on the way of delayed unoptimizing (means
+        * there is still a relative jump) and disabled.
+        */
+       op = container_of(ap, struct optimized_kprobe, kp);
+       if (unlikely(list_empty(&op->list)))
+               printk(KERN_WARNING "Warning: found a stray unused "
+                       "aggrprobe@%p\n", ap->addr);
+       /* Enable the probe again */
+       ap->flags &= ~KPROBE_FLAG_DISABLED;
+       /* Optimize it again (remove from op->list) */
+       BUG_ON(!kprobe_optready(ap));
+       optimize_kprobe(ap);
+}
+
 /* Remove optimized instructions */
 static void __kprobes kill_optimized_kprobe(struct kprobe *p)
 {
@@ -872,6 +888,13 @@ static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p)                     kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()            do {} while (0)
 
+/* There should be no unused kprobes can be reused without optimization */
+static void reuse_unused_kprobe(struct kprobe *ap)
+{
+       printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
+       BUG_ON(kprobe_unused(ap));
+}
+
 static __kprobes void free_aggr_kprobe(struct kprobe *p)
 {
        arch_remove_kprobe(p);
@@ -942,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
                                        int trapnr)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
 
        /*
         * if we faulted "during" the execution of a user specified
@@ -957,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 
 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
        int ret = 0;
 
        if (cur && cur->break_handler) {
@@ -990,9 +1013,9 @@ void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
        hlist_del(&ri->hlist);
        INIT_HLIST_NODE(&ri->hlist);
        if (likely(rp)) {
-               spin_lock(&rp->lock);
+               raw_spin_lock(&rp->lock);
                hlist_add_head(&ri->hlist, &rp->free_instances);
-               spin_unlock(&rp->lock);
+               raw_spin_unlock(&rp->lock);
        } else
                /* Unregistering */
                hlist_add_head(&ri->hlist, head);
@@ -1003,19 +1026,19 @@ void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
 __acquires(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-       spinlock_t *hlist_lock;
+       raw_spinlock_t *hlist_lock;
 
        *head = &kretprobe_inst_table[hash];
        hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_lock_irqsave(hlist_lock, *flags);
+       raw_spin_lock_irqsave(hlist_lock, *flags);
 }
 
 static void __kprobes kretprobe_table_lock(unsigned long hash,
        unsigned long *flags)
 __acquires(hlist_lock)
 {
-       spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_lock_irqsave(hlist_lock, *flags);
+       raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+       raw_spin_lock_irqsave(hlist_lock, *flags);
 }
 
 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
@@ -1023,18 +1046,18 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
 __releases(hlist_lock)
 {
        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
-       spinlock_t *hlist_lock;
+       raw_spinlock_t *hlist_lock;
 
        hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_unlock_irqrestore(hlist_lock, *flags);
+       raw_spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
 static void __kprobes kretprobe_table_unlock(unsigned long hash,
        unsigned long *flags)
 __releases(hlist_lock)
 {
-       spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
-       spin_unlock_irqrestore(hlist_lock, *flags);
+       raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
+       raw_spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
 /*
@@ -1054,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
        kretprobe_table_lock(hash, &flags);
@@ -1062,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                        recycle_rp_inst(ri, &empty_rp);
        }
        kretprobe_table_unlock(hash, &flags);
-       INIT_HLIST_HEAD(&empty_rp);
        hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
@@ -1173,8 +1196,8 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
                        return -ENOMEM;
                init_aggr_kprobe(ap, orig_p);
        } else if (kprobe_unused(ap))
-               /* Busy to die */
-               return -EBUSY;
+               /* This probe is going to die. Rescue it */
+               reuse_unused_kprobe(ap);
 
        if (kprobe_gone(ap)) {
                /*
@@ -1232,19 +1255,29 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
 /*
  * If we have a symbol_name argument, look it up and add the offset field
  * to it. This way, we can specify a relative address to a symbol.
+ * This returns encoded errors if it fails to look up symbol or invalid
+ * combination of parameters.
  */
 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
 {
        kprobe_opcode_t *addr = p->addr;
+
+       if ((p->symbol_name && p->addr) ||
+           (!p->symbol_name && !p->addr))
+               goto invalid;
+
        if (p->symbol_name) {
-               if (addr)
-                       return NULL;
                kprobe_lookup_name(p->symbol_name, addr);
+               if (!addr)
+                       return ERR_PTR(-ENOENT);
        }
 
-       if (!addr)
-               return NULL;
-       return (kprobe_opcode_t *)(((char *)addr) + p->offset);
+       addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
+       if (addr)
+               return addr;
+
+invalid:
+       return ERR_PTR(-EINVAL);
 }
 
 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
@@ -1288,8 +1321,8 @@ int __kprobes register_kprobe(struct kprobe *p)
        kprobe_opcode_t *addr;
 
        addr = kprobe_addr(p);
-       if (!addr)
-               return -EINVAL;
+       if (IS_ERR(addr))
+               return PTR_ERR(addr);
        p->addr = addr;
 
        ret = check_kprobe_rereg(p);
@@ -1301,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
            ftrace_text_reserved(p->addr, p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr))
-               goto fail_with_jump_label;
+           jump_label_text_reserved(p->addr, p->addr)) {
+               ret = -EINVAL;
+               goto cannot_probe;
+       }
 
        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
        p->flags &= KPROBE_FLAG_DISABLED;
@@ -1312,12 +1347,14 @@ int __kprobes register_kprobe(struct kprobe *p)
         */
        probed_mod = __module_text_address((unsigned long) p->addr);
        if (probed_mod) {
+               /* Return -ENOENT if fail. */
+               ret = -ENOENT;
                /*
                 * We must hold a refcount of the probed module while updating
                 * its code to prohibit unexpected unloading.
                 */
                if (unlikely(!try_module_get(probed_mod)))
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
 
                /*
                 * If the module freed .init.text, we couldn't insert
@@ -1326,8 +1363,9 @@ int __kprobes register_kprobe(struct kprobe *p)
                if (within_module_init((unsigned long)p->addr, probed_mod) &&
                    probed_mod->state != MODULE_STATE_COMING) {
                        module_put(probed_mod);
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
                }
+               /* ret will be updated by following code */
        }
        preempt_enable();
        jump_label_unlock();
@@ -1373,10 +1411,10 @@ out:
 
        return ret;
 
-fail_with_jump_label:
+cannot_probe:
        preempt_enable();
        jump_label_unlock();
-       return -EINVAL;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(register_kprobe);
 
@@ -1627,18 +1665,22 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
 
        /*TODO: consider to only swap the RA after the last pre_handler fired */
        hash = hash_ptr(current, KPROBE_HASH_BITS);
-       spin_lock_irqsave(&rp->lock, flags);
+       raw_spin_lock_irqsave(&rp->lock, flags);
        if (!hlist_empty(&rp->free_instances)) {
                ri = hlist_entry(rp->free_instances.first,
                                struct kretprobe_instance, hlist);
                hlist_del(&ri->hlist);
-               spin_unlock_irqrestore(&rp->lock, flags);
+               raw_spin_unlock_irqrestore(&rp->lock, flags);
 
                ri->rp = rp;
                ri->task = current;
 
-               if (rp->entry_handler && rp->entry_handler(ri, regs))
+               if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+                       raw_spin_lock_irqsave(&rp->lock, flags);
+                       hlist_add_head(&ri->hlist, &rp->free_instances);
+                       raw_spin_unlock_irqrestore(&rp->lock, flags);
                        return 0;
+               }
 
                arch_prepare_kretprobe(ri, regs);
 
@@ -1649,7 +1691,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
                kretprobe_table_unlock(hash, &flags);
        } else {
                rp->nmissed++;
-               spin_unlock_irqrestore(&rp->lock, flags);
+               raw_spin_unlock_irqrestore(&rp->lock, flags);
        }
        return 0;
 }
@@ -1663,8 +1705,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
 
        if (kretprobe_blacklist_size) {
                addr = kprobe_addr(&rp->kp);
-               if (!addr)
-                       return -EINVAL;
+               if (IS_ERR(addr))
+                       return PTR_ERR(addr);
 
                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
                        if (kretprobe_blacklist[i].addr == addr)
@@ -1685,7 +1727,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
                rp->maxactive = num_possible_cpus();
 #endif
        }
-       spin_lock_init(&rp->lock);
+       raw_spin_lock_init(&rp->lock);
        INIT_HLIST_HEAD(&rp->free_instances);
        for (i = 0; i < rp->maxactive; i++) {
                inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -1923,7 +1965,7 @@ static int __init init_kprobes(void)
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                INIT_HLIST_HEAD(&kprobe_table[i]);
                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
-               spin_lock_init(&(kretprobe_table_locks[i].lock));
+               raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
        }
 
        /*
@@ -2162,7 +2204,7 @@ static ssize_t write_enabled_file_bool(struct file *file,
               const char __user *user_buf, size_t count, loff_t *ppos)
 {
        char buf[32];
-       int buf_size;
+       size_t buf_size;
 
        buf_size = min(count, (sizeof(buf)-1));
        if (copy_from_user(buf, user_buf, buf_size))