powerpc: don't duplicate name between vio_driver and device_driver
[linux-3.10.git] / kernel / signal.c
index 8f3debc..f2b96b0 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ptrace.h>
 #include <linux/posix-timers.h>
 #include <linux/signal.h>
+#include <linux/audit.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -212,6 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 fastcall void recalc_sigpending_tsk(struct task_struct *t)
 {
        if (t->signal->group_stop_count > 0 ||
+           (freezing(t)) ||
            PENDING(&t->pending, &t->blocked) ||
            PENDING(&t->signal->shared_pending, &t->blocked))
                set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -260,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
                                         int override_rlimit)
 {
        struct sigqueue *q = NULL;
@@ -395,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
        flush_sigqueue(&tsk->pending);
        if (sig) {
                /*
-                * We are cleaning up the signal_struct here.  We delayed
-                * calling exit_itimers until after flush_sigqueue, just in
-                * case our thread-local pending queue contained a queued
-                * timer signal that would have been cleared in
-                * exit_itimers.  When that called sigqueue_free, it would
-                * attempt to re-take the tasklist_lock and deadlock.  This
-                * can never happen if we ensure that all queues the
-                * timer's signal might be queued on have been flushed
-                * first.  The shared_pending queue, and our own pending
-                * queue are the only queues the timer could be on, since
-                * there are no other threads left in the group and timer
-                * signals are constrained to threads inside the group.
+                * We are cleaning up the signal_struct here.
                 */
-               exit_itimers(sig);
                exit_thread_group_keys(sig);
                kmem_cache_free(signal_cachep, sig);
        }
@@ -522,7 +512,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 {
        int sig = 0;
 
-       sig = next_signal(pending, mask);
+       /* SIGKILL must have priority, otherwise it is quite easy
+        * to create an unkillable process, sending sig < SIGKILL
+        * to self */
+       if (unlikely(sigismember(&pending->signal, SIGKILL))) {
+               if (!sigismember(mask, SIGKILL))
+                       sig = SIGKILL;
+       }
+
+       if (likely(!sig))
+               sig = next_signal(pending, mask);
        if (sig) {
                if (current->notifier) {
                        if (sigismember(current->notifier_mask, sig)) {
@@ -567,7 +566,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+               if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+                       tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
        if ( signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
@@ -658,12 +658,16 @@ static int check_kill_permission(int sig, struct siginfo *info,
            && (current->uid ^ t->suid) && (current->uid ^ t->uid)
            && !capable(CAP_KILL))
                return error;
-       return security_task_kill(t, info, sig);
+
+       error = security_task_kill(t, info, sig);
+       if (!error)
+               audit_signal_info(sig, t); /* Let audit system see the signal */
+       return error;
 }
 
 /* forward decl */
 static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    struct task_struct *parent,
+                                    int to_self,
                                     int why);
 
 /*
@@ -677,7 +681,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 {
        struct task_struct *t;
 
-       if (p->flags & SIGNAL_GROUP_EXIT)
+       if (p->signal->flags & SIGNAL_GROUP_EXIT)
                /*
                 * The process is in the middle of dying already.
                 */
@@ -714,14 +718,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->group_stop_count = 0;
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        spin_unlock(&p->sighand->siglock);
-                       if (p->ptrace & PT_PTRACED)
-                               do_notify_parent_cldstop(p, p->parent,
-                                                        CLD_STOPPED);
-                       else
-                               do_notify_parent_cldstop(
-                                       p->group_leader,
-                                       p->group_leader->real_parent,
-                                                        CLD_STOPPED);
+                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
                        spin_lock(&p->sighand->siglock);
                }
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -762,14 +759,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        p->signal->flags = SIGNAL_STOP_CONTINUED;
                        p->signal->group_exit_code = 0;
                        spin_unlock(&p->sighand->siglock);
-                       if (p->ptrace & PT_PTRACED)
-                               do_notify_parent_cldstop(p, p->parent,
-                                                        CLD_CONTINUED);
-                       else
-                               do_notify_parent_cldstop(
-                                       p->group_leader,
-                                       p->group_leader->real_parent,
-                                                        CLD_CONTINUED);
+                       do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
                        spin_lock(&p->sighand->siglock);
                } else {
                        /*
@@ -935,34 +925,31 @@ force_sig_specific(int sig, struct task_struct *t)
  * as soon as they're available, so putting the signal on the shared queue
  * will be equivalent to sending it to one such thread.
  */
-#define wants_signal(sig, p, mask)                     \
-       (!sigismember(&(p)->blocked, sig)               \
-        && !((p)->state & mask)                        \
-        && !((p)->flags & PF_EXITING)                  \
-        && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (p->state & (TASK_STOPPED | TASK_TRACED))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
 
 static void
 __group_complete_signal(int sig, struct task_struct *p)
 {
-       unsigned int mask;
        struct task_struct *t;
 
        /*
-        * Don't bother traced and stopped tasks (but
-        * SIGKILL will punch through that).
-        */
-       mask = TASK_STOPPED | TASK_TRACED;
-       if (sig == SIGKILL)
-               mask = 0;
-
-       /*
         * Now find a thread we can wake up to take the signal off the queue.
         *
         * If the main thread wants the signal, it gets first crack.
         * Probably the least surprising to the average bear.
         */
-       if (wants_signal(sig, p, mask))
+       if (wants_signal(sig, p))
                t = p;
        else if (thread_group_empty(p))
                /*
@@ -980,7 +967,7 @@ __group_complete_signal(int sig, struct task_struct *p)
                        t = p->signal->curr_target = p;
                BUG_ON(t->tgid != p->tgid);
 
-               while (!wants_signal(sig, t, mask)) {
+               while (!wants_signal(sig, t)) {
                        t = next_thread(t);
                        if (t == p->signal->curr_target)
                                /*
@@ -1194,6 +1181,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+/* like kill_proc_info(), but doesn't use uid/euid of "current" */
+int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
+                     uid_t uid, uid_t euid)
+{
+       int ret = -EINVAL;
+       struct task_struct *p;
+
+       if (!valid_signal(sig))
+               return ret;
+
+       read_lock(&tasklist_lock);
+       p = find_task_by_pid(pid);
+       if (!p) {
+               ret = -ESRCH;
+               goto out_unlock;
+       }
+       if ((!info || ((unsigned long)info != 1 &&
+                       (unsigned long)info != 2 && SI_FROMUSER(info)))
+           && (euid != p->suid) && (euid != p->uid)
+           && (uid != p->suid) && (uid != p->uid)) {
+               ret = -EPERM;
+               goto out_unlock;
+       }
+       if (sig && p->sighand) {
+               unsigned long flags;
+               spin_lock_irqsave(&p->sighand->siglock, flags);
+               ret = __group_send_sig_info(sig, info, p);
+               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       }
+out_unlock:
+       read_unlock(&tasklist_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1365,16 +1386,16 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
        unsigned long flags;
        int ret = 0;
 
-       /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
-        */
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-       read_lock(&tasklist_lock);  
+       read_lock(&tasklist_lock);
+
+       if (unlikely(p->flags & PF_EXITING)) {
+               ret = -1;
+               goto out_err;
+       }
+
        spin_lock_irqsave(&p->sighand->siglock, flags);
-       
+
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
@@ -1384,7 +1405,7 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                        BUG();
                q->info.si_overrun++;
                goto out;
-       } 
+       }
        /* Short-circuit ignored signals.  */
        if (sig_ignored(p, sig)) {
                ret = 1;
@@ -1399,8 +1420,10 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 
 out:
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
+out_err:
        read_unlock(&tasklist_lock);
-       return(ret);
+
+       return ret;
 }
 
 int
@@ -1527,14 +1550,20 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        spin_unlock_irqrestore(&psig->siglock, flags);
 }
 
-static void
-do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
-                        int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
 {
        struct siginfo info;
        unsigned long flags;
+       struct task_struct *parent;
        struct sighand_struct *sighand;
 
+       if (to_self)
+               parent = tsk->parent;
+       else {
+               tsk = tsk->group_leader;
+               parent = tsk->real_parent;
+       }
+
        info.si_signo = SIGCHLD;
        info.si_errno = 0;
        info.si_pid = tsk->pid;
@@ -1603,8 +1632,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
                   !(current->ptrace & PT_ATTACHED)) &&
            (likely(current->parent->signal != current->signal) ||
             !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
-               do_notify_parent_cldstop(current, current->parent,
-                                        CLD_TRAPPED);
+               do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
@@ -1653,25 +1681,25 @@ void ptrace_notify(int exit_code)
 static void
 finish_stop(int stop_count)
 {
+       int to_self;
+
        /*
         * If there are no other threads in the group, or if there is
         * a group stop in progress and we are the last to stop,
         * report to the parent.  When ptraced, every thread reports itself.
         */
-       if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current, current->parent,
-                                        CLD_STOPPED);
-               read_unlock(&tasklist_lock);
-       }
-       else if (stop_count == 0) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current->group_leader,
-                                        current->group_leader->real_parent,
-                                        CLD_STOPPED);
-               read_unlock(&tasklist_lock);
-       }
+       if (stop_count < 0 || (current->ptrace & PT_PTRACED))
+               to_self = 1;
+       else if (stop_count == 0)
+               to_self = 0;
+       else
+               goto out;
 
+       read_lock(&tasklist_lock);
+       do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
+       read_unlock(&tasklist_lock);
+
+out:
        schedule();
        /*
         * Now we don't run again until continued.
@@ -1758,7 +1786,8 @@ do_signal_stop(int signr)
                                 * stop is always done with the siglock held,
                                 * so this check has no races.
                                 */
-                               if (t->state < TASK_STOPPED) {
+                               if (!t->exit_state &&
+                                   !(t->state & (TASK_STOPPED|TASK_TRACED))) {
                                        stop_count++;
                                        signal_wake_up(t, 0);
                                }
@@ -2213,11 +2242,9 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
-                       if (current->flags & PF_FREEZE)
-                               refrigerator(PF_FREEZE);
+                       try_to_freeze();
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &these, &info);
                        current->blocked = current->real_blocked;