]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - kernel/signal.c
[PATCH] Fix signal sending in usbdevio on async URB completion
[linux-3.10.git] / kernel / signal.c
index ca1186eef9380cd5e633f644a0891d1dacc5bd16..50c992643771e033c3833e42066fd3218f812d27 100644 (file)
@@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
                                         int override_rlimit)
 {
        struct sigqueue *q = NULL;
@@ -578,7 +578,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+               if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+                       tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
        if ( signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
@@ -678,7 +679,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
 
 /* forward decl */
 static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    struct task_struct *parent,
+                                    int to_self,
                                     int why);
 
 /*
@@ -692,7 +693,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 {
        struct task_struct *t;
 
-       if (p->flags & SIGNAL_GROUP_EXIT)
+       if (p->signal->flags & SIGNAL_GROUP_EXIT)
                /*
                 * The process is in the middle of dying already.
                 */
@@ -729,14 +730,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->group_stop_count = 0;
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        spin_unlock(&p->sighand->siglock);
-                       if (p->ptrace & PT_PTRACED)
-                               do_notify_parent_cldstop(p, p->parent,
-                                                        CLD_STOPPED);
-                       else
-                               do_notify_parent_cldstop(
-                                       p->group_leader,
-                                       p->group_leader->real_parent,
-                                                        CLD_STOPPED);
+                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
                        spin_lock(&p->sighand->siglock);
                }
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -777,14 +771,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        p->signal->group_exit_code = 0;
                        spin_unlock(&p->sighand->siglock);
-                       if (p->ptrace & PT_PTRACED)
-                               do_notify_parent_cldstop(p, p->parent,
-                                                        CLD_CONTINUED);
-                       else
-                               do_notify_parent_cldstop(
-                                       p->group_leader,
-                                       p->group_leader->real_parent,
-                                                        CLD_CONTINUED);
+                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
                        spin_lock(&p->sighand->siglock);
                } else {
                        /*
@@ -950,34 +937,31 @@ force_sig_specific(int sig, struct task_struct *t)
  * as soon as they're available, so putting the signal on the shared queue
  * will be equivalent to sending it to one such thread.
  */
-#define wants_signal(sig, p, mask)                     \
-       (!sigismember(&(p)->blocked, sig)               \
-        && !((p)->state & mask)                        \
-        && !((p)->flags & PF_EXITING)                  \
-        && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (p->state & (TASK_STOPPED | TASK_TRACED))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
 
 static void
 __group_complete_signal(int sig, struct task_struct *p)
 {
-       unsigned int mask;
        struct task_struct *t;
 
-       /*
-        * Don't bother traced and stopped tasks (but
-        * SIGKILL will punch through that).
-        */
-       mask = TASK_STOPPED | TASK_TRACED;
-       if (sig == SIGKILL)
-               mask = 0;
-
        /*
         * Now find a thread we can wake up to take the signal off the queue.
         *
         * If the main thread wants the signal, it gets first crack.
         * Probably the least surprising to the average bear.
         */
-       if (wants_signal(sig, p, mask))
+       if (wants_signal(sig, p))
                t = p;
        else if (thread_group_empty(p))
                /*
@@ -995,7 +979,7 @@ __group_complete_signal(int sig, struct task_struct *p)
                        t = p->signal->curr_target = p;
                BUG_ON(t->tgid != p->tgid);
 
-               while (!wants_signal(sig, t, mask)) {
+               while (!wants_signal(sig, t)) {
                        t = next_thread(t);
                        if (t == p->signal->curr_target)
                                /*
@@ -1209,6 +1193,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+/* like kill_proc_info(), but doesn't use uid/euid of "current" */
+int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
+                     uid_t uid, uid_t euid)
+{
+       int ret = -EINVAL;
+       struct task_struct *p;
+
+       if (!valid_signal(sig))
+               return ret;
+
+       read_lock(&tasklist_lock);
+       p = find_task_by_pid(pid);
+       if (!p) {
+               ret = -ESRCH;
+               goto out_unlock;
+       }
+       if ((!info || ((unsigned long)info != 1 &&
+                       (unsigned long)info != 2 && SI_FROMUSER(info)))
+           && (euid != p->suid) && (euid != p->uid)
+           && (uid != p->suid) && (uid != p->uid)) {
+               ret = -EPERM;
+               goto out_unlock;
+       }
+       if (sig && p->sighand) {
+               unsigned long flags;
+               spin_lock_irqsave(&p->sighand->siglock, flags);
+               ret = __group_send_sig_info(sig, info, p);
+               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       }
+out_unlock:
+       read_unlock(&tasklist_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1380,16 +1398,16 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
        unsigned long flags;
        int ret = 0;
 
-       /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
-        */
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-       read_lock(&tasklist_lock);  
+       read_lock(&tasklist_lock);
+
+       if (unlikely(p->flags & PF_EXITING)) {
+               ret = -1;
+               goto out_err;
+       }
+
        spin_lock_irqsave(&p->sighand->siglock, flags);
-       
+
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
@@ -1399,7 +1417,7 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                        BUG();
                q->info.si_overrun++;
                goto out;
-       } 
+       }
        /* Short-circuit ignored signals.  */
        if (sig_ignored(p, sig)) {
                ret = 1;
@@ -1414,8 +1432,10 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 
 out:
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
+out_err:
        read_unlock(&tasklist_lock);
-       return(ret);
+
+       return ret;
 }
 
 int
@@ -1542,14 +1562,20 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        spin_unlock_irqrestore(&psig->siglock, flags);
 }
 
-static void
-do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
-                        int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
 {
        struct siginfo info;
        unsigned long flags;
+       struct task_struct *parent;
        struct sighand_struct *sighand;
 
+       if (to_self)
+               parent = tsk->parent;
+       else {
+               tsk = tsk->group_leader;
+               parent = tsk->real_parent;
+       }
+
        info.si_signo = SIGCHLD;
        info.si_errno = 0;
        info.si_pid = tsk->pid;
@@ -1618,8 +1644,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
                   !(current->ptrace & PT_ATTACHED)) &&
            (likely(current->parent->signal != current->signal) ||
             !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
-               do_notify_parent_cldstop(current, current->parent,
-                                        CLD_TRAPPED);
+               do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
@@ -1668,25 +1693,25 @@ void ptrace_notify(int exit_code)
 static void
 finish_stop(int stop_count)
 {
+       int to_self;
+
        /*
         * If there are no other threads in the group, or if there is
         * a group stop in progress and we are the last to stop,
         * report to the parent.  When ptraced, every thread reports itself.
         */
-       if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current, current->parent,
-                                        CLD_STOPPED);
-               read_unlock(&tasklist_lock);
-       }
-       else if (stop_count == 0) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current->group_leader,
-                                        current->group_leader->real_parent,
-                                        CLD_STOPPED);
-               read_unlock(&tasklist_lock);
-       }
+       if (stop_count < 0 || (current->ptrace & PT_PTRACED))
+               to_self = 1;
+       else if (stop_count == 0)
+               to_self = 0;
+       else
+               goto out;
 
+       read_lock(&tasklist_lock);
+       do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
+       read_unlock(&tasklist_lock);
+
+out:
        schedule();
        /*
         * Now we don't run again until continued.
@@ -1773,7 +1798,8 @@ do_signal_stop(int signr)
                                 * stop is always done with the siglock held,
                                 * so this check has no races.
                                 */
-                               if (t->state < TASK_STOPPED) {
+                               if (!t->exit_state &&
+                                   !(t->state & (TASK_STOPPED|TASK_TRACED))) {
                                        stop_count++;
                                        signal_wake_up(t, 0);
                                }
@@ -2228,8 +2254,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);