kmod: prevent kmod_loop_msg overflow in __request_module()
[linux-2.6.git] / kernel / kmod.c
index 7efba6f..a4bea97 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/kmod.h>
 #include <linux/slab.h>
 #include <linux/completion.h>
+#include <linux/cred.h>
 #include <linux/file.h>
 #include <linux/fdtable.h>
 #include <linux/workqueue.h>
@@ -43,6 +44,13 @@ extern int max_threads;
 
 static struct workqueue_struct *khelper_wq;
 
+#define CAP_BSET       (void *)1
+#define CAP_PI         (void *)2
+
+static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
+static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
+static DEFINE_SPINLOCK(umh_sysctl_lock);
+
 #ifdef CONFIG_MODULES
 
 /*
@@ -106,10 +114,12 @@ int __request_module(bool wait, const char *fmt, ...)
        atomic_inc(&kmod_concurrent);
        if (atomic_read(&kmod_concurrent) > max_modprobes) {
                /* We may be blaming an innocent here, but unlikely */
-               if (kmod_loop_msg++ < 5)
+               if (kmod_loop_msg < 5) {
                        printk(KERN_ERR
                               "request_module: runaway loop modprobe %s\n",
                               module_name);
+                       kmod_loop_msg++;
+               }
                atomic_dec(&kmod_concurrent);
                return -ENOMEM;
        }
@@ -132,6 +142,7 @@ EXPORT_SYMBOL(__request_module);
 static int ____call_usermodehelper(void *data)
 {
        struct subprocess_info *sub_info = data;
+       struct cred *new;
        int retval;
 
        spin_lock_irq(&current->sighand->siglock);
@@ -147,13 +158,30 @@ static int ____call_usermodehelper(void *data)
         */
        set_user_nice(current, 0);
 
+       retval = -ENOMEM;
+       new = prepare_kernel_cred(current);
+       if (!new)
+               goto fail;
+
+       spin_lock(&umh_sysctl_lock);
+       new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
+       new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
+                                            new->cap_inheritable);
+       spin_unlock(&umh_sysctl_lock);
+
        if (sub_info->init) {
-               retval = sub_info->init(sub_info);
-               if (retval)
+               retval = sub_info->init(sub_info, new);
+               if (retval) {
+                       abort_creds(new);
                        goto fail;
+               }
        }
 
-       retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
+       commit_creds(new);
+
+       retval = kernel_execve(sub_info->path,
+                              (const char *const *)sub_info->argv,
+                              (const char *const *)sub_info->envp);
 
        /* Exec failed? */
 fail:
@@ -175,16 +203,16 @@ static int wait_for_helper(void *data)
        struct subprocess_info *sub_info = data;
        pid_t pid;
 
-       /* Install a handler: if SIGCLD isn't handled sys_wait4 won't
-        * populate the status, but will return -ECHILD. */
-       allow_signal(SIGCHLD);
+       /* If SIGCLD is ignored sys_wait4 won't populate the status. */
+       spin_lock_irq(&current->sighand->siglock);
+       current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
+       spin_unlock_irq(&current->sighand->siglock);
 
        pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
        if (pid < 0) {
                sub_info->retval = pid;
        } else {
-               int ret;
-
+               int ret = -ECHILD;
                /*
                 * Normally it is bogus to call wait4() from in-kernel because
                 * wait4() wants to write the exit code to a userspace address.
@@ -205,10 +233,7 @@ static int wait_for_helper(void *data)
                        sub_info->retval = ret;
        }
 
-       if (sub_info->wait == UMH_NO_WAIT)
-               call_usermodehelper_freeinfo(sub_info);
-       else
-               complete(sub_info->complete);
+       complete(sub_info->complete);
        return 0;
 }
 
@@ -217,13 +242,13 @@ static void __call_usermodehelper(struct work_struct *work)
 {
        struct subprocess_info *sub_info =
                container_of(work, struct subprocess_info, work);
-       pid_t pid;
        enum umh_wait wait = sub_info->wait;
+       pid_t pid;
 
        /* CLONE_VFORK: wait until the usermode helper has execve'd
         * successfully We need the data structures to stay around
         * until that is done.  */
-       if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
+       if (wait == UMH_WAIT_PROC)
                pid = kernel_thread(wait_for_helper, sub_info,
                                    CLONE_FS | CLONE_FILES | SIGCHLD);
        else
@@ -232,26 +257,26 @@ static void __call_usermodehelper(struct work_struct *work)
 
        switch (wait) {
        case UMH_NO_WAIT:
+               call_usermodehelper_freeinfo(sub_info);
                break;
 
        case UMH_WAIT_PROC:
                if (pid > 0)
                        break;
-               sub_info->retval = pid;
                /* FALLTHROUGH */
-
        case UMH_WAIT_EXEC:
+               if (pid < 0)
+                       sub_info->retval = pid;
                complete(sub_info->complete);
        }
 }
 
-#ifdef CONFIG_PM_SLEEP
 /*
  * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
  * (used for preventing user land processes from being created after the user
  * land has been frozen during a system-wide hibernation or suspend operation).
  */
-static int usermodehelper_disabled;
+static int usermodehelper_disabled = 1;
 
 /* Number of helpers running */
 static atomic_t running_helpers = ATOMIC_INIT(0);
@@ -301,6 +326,15 @@ void usermodehelper_enable(void)
        usermodehelper_disabled = 0;
 }
 
+/**
+ * usermodehelper_is_disabled - check if new helpers are allowed to be started
+ */
+bool usermodehelper_is_disabled(void)
+{
+       return usermodehelper_disabled;
+}
+EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
+
 static void helper_lock(void)
 {
        atomic_inc(&running_helpers);
@@ -312,12 +346,6 @@ static void helper_unlock(void)
        if (atomic_dec_and_test(&running_helpers))
                wake_up(&running_helpers_waitq);
 }
-#else /* CONFIG_PM_SLEEP */
-#define usermodehelper_disabled        0
-
-static inline void helper_lock(void) {}
-static inline void helper_unlock(void) {}
-#endif /* CONFIG_PM_SLEEP */
 
 /**
  * call_usermodehelper_setup - prepare to call a usermode helper
@@ -364,7 +392,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
  * context in which call_usermodehelper_exec is called.
  */
 void call_usermodehelper_setfns(struct subprocess_info *info,
-                   int (*init)(struct subprocess_info *info),
+                   int (*init)(struct subprocess_info *info, struct cred *new),
                    void (*cleanup)(struct subprocess_info *info),
                    void *data)
 {
@@ -418,6 +446,84 @@ unlock:
 }
 EXPORT_SYMBOL(call_usermodehelper_exec);
 
+static int proc_cap_handler(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct ctl_table t;
+       unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
+       kernel_cap_t new_cap;
+       int err, i;
+
+       if (write && (!capable(CAP_SETPCAP) ||
+                     !capable(CAP_SYS_MODULE)))
+               return -EPERM;
+
+       /*
+        * convert from the global kernel_cap_t to the ulong array to print to
+        * userspace if this is a read.
+        */
+       spin_lock(&umh_sysctl_lock);
+       for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
+               if (table->data == CAP_BSET)
+                       cap_array[i] = usermodehelper_bset.cap[i];
+               else if (table->data == CAP_PI)
+                       cap_array[i] = usermodehelper_inheritable.cap[i];
+               else
+                       BUG();
+       }
+       spin_unlock(&umh_sysctl_lock);
+
+       t = *table;
+       t.data = &cap_array;
+
+       /*
+        * actually read or write and array of ulongs from userspace.  Remember
+        * these are least significant 32 bits first
+        */
+       err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
+       if (err < 0)
+               return err;
+
+       /*
+        * convert from the sysctl array of ulongs to the kernel_cap_t
+        * internal representation
+        */
+       for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
+               new_cap.cap[i] = cap_array[i];
+
+       /*
+        * Drop everything not in the new_cap (but don't add things)
+        */
+       spin_lock(&umh_sysctl_lock);
+       if (write) {
+               if (table->data == CAP_BSET)
+                       usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
+               if (table->data == CAP_PI)
+                       usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
+       }
+       spin_unlock(&umh_sysctl_lock);
+
+       return 0;
+}
+
+struct ctl_table usermodehelper_table[] = {
+       {
+               .procname       = "bset",
+               .data           = CAP_BSET,
+               .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
+               .mode           = 0600,
+               .proc_handler   = proc_cap_handler,
+       },
+       {
+               .procname       = "inheritable",
+               .data           = CAP_PI,
+               .maxlen         = _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
+               .mode           = 0600,
+               .proc_handler   = proc_cap_handler,
+       },
+       { }
+};
+
 void __init usermodehelper_init(void)
 {
        khelper_wq = create_singlethread_workqueue("khelper");