job control: Add @for_ptrace to do_notify_parent_cldstop()
[linux-2.6.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"      /* audit_signal_info() */
39
40 /*
41  * SLAB caches for signal bits.
42  */
43
44 static struct kmem_cache *sigqueue_cachep;
45
46 int print_fatal_signals __read_mostly;
47
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50         return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55         /* Is it explicitly or implicitly ignored? */
56         return handler == SIG_IGN ||
57                 (handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59
60 static int sig_task_ignored(struct task_struct *t, int sig,
61                 int from_ancestor_ns)
62 {
63         void __user *handler;
64
65         handler = sig_handler(t, sig);
66
67         if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68                         handler == SIG_DFL && !from_ancestor_ns)
69                 return 1;
70
71         return sig_handler_ignored(handler, sig);
72 }
73
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76         /*
77          * Blocked signals are never ignored, since the
78          * signal handler may change by the time it is
79          * unblocked.
80          */
81         if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82                 return 0;
83
84         if (!sig_task_ignored(t, sig, from_ancestor_ns))
85                 return 0;
86
87         /*
88          * Tracers may want to know about even ignored signals.
89          */
90         return !tracehook_consider_ignored_signal(t, sig);
91 }
92
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99         unsigned long ready;
100         long i;
101
102         switch (_NSIG_WORDS) {
103         default:
104                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105                         ready |= signal->sig[i] &~ blocked->sig[i];
106                 break;
107
108         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109                 ready |= signal->sig[2] &~ blocked->sig[2];
110                 ready |= signal->sig[1] &~ blocked->sig[1];
111                 ready |= signal->sig[0] &~ blocked->sig[0];
112                 break;
113
114         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115                 ready |= signal->sig[0] &~ blocked->sig[0];
116                 break;
117
118         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119         }
120         return ready != 0;
121 }
122
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127         if ((t->group_stop & GROUP_STOP_PENDING) ||
128             PENDING(&t->pending, &t->blocked) ||
129             PENDING(&t->signal->shared_pending, &t->blocked)) {
130                 set_tsk_thread_flag(t, TIF_SIGPENDING);
131                 return 1;
132         }
133         /*
134          * We must never clear the flag in another thread, or in current
135          * when it's possible the current syscall is returning -ERESTART*.
136          * So we don't clear it here, and only callers who know they should do.
137          */
138         return 0;
139 }
140
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147         if (recalc_sigpending_tsk(t))
148                 signal_wake_up(t, 0);
149 }
150
151 void recalc_sigpending(void)
152 {
153         if (unlikely(tracehook_force_sigpending()))
154                 set_thread_flag(TIF_SIGPENDING);
155         else if (!recalc_sigpending_tsk(current) && !freezing(current))
156                 clear_thread_flag(TIF_SIGPENDING);
157
158 }
159
160 /* Given the mask, find the first available signal that should be serviced. */
161
162 #define SYNCHRONOUS_MASK \
163         (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164          sigmask(SIGTRAP) | sigmask(SIGFPE))
165
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168         unsigned long i, *s, *m, x;
169         int sig = 0;
170
171         s = pending->signal.sig;
172         m = mask->sig;
173
174         /*
175          * Handle the first word specially: it contains the
176          * synchronous signals that need to be dequeued first.
177          */
178         x = *s &~ *m;
179         if (x) {
180                 if (x & SYNCHRONOUS_MASK)
181                         x &= SYNCHRONOUS_MASK;
182                 sig = ffz(~x) + 1;
183                 return sig;
184         }
185
186         switch (_NSIG_WORDS) {
187         default:
188                 for (i = 1; i < _NSIG_WORDS; ++i) {
189                         x = *++s &~ *++m;
190                         if (!x)
191                                 continue;
192                         sig = ffz(~x) + i*_NSIG_BPW + 1;
193                         break;
194                 }
195                 break;
196
197         case 2:
198                 x = s[1] &~ m[1];
199                 if (!x)
200                         break;
201                 sig = ffz(~x) + _NSIG_BPW + 1;
202                 break;
203
204         case 1:
205                 /* Nothing to do */
206                 break;
207         }
208
209         return sig;
210 }
211
212 static inline void print_dropped_signal(int sig)
213 {
214         static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216         if (!print_fatal_signals)
217                 return;
218
219         if (!__ratelimit(&ratelimit_state))
220                 return;
221
222         printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223                                 current->comm, current->pid, sig);
224 }
225
226 /**
227  * task_clear_group_stop_trapping - clear group stop trapping bit
228  * @task: target task
229  *
230  * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us.  Clear it
231  * and wake up the ptracer.  Note that we don't need any further locking.
232  * @task->siglock guarantees that @task->parent points to the ptracer.
233  *
234  * CONTEXT:
235  * Must be called with @task->sighand->siglock held.
236  */
237 static void task_clear_group_stop_trapping(struct task_struct *task)
238 {
239         if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
240                 task->group_stop &= ~GROUP_STOP_TRAPPING;
241                 __wake_up_sync(&task->parent->signal->wait_chldexit,
242                                TASK_UNINTERRUPTIBLE, 1);
243         }
244 }
245
246 /**
247  * task_clear_group_stop_pending - clear pending group stop
248  * @task: target task
249  *
250  * Clear group stop states for @task.
251  *
252  * CONTEXT:
253  * Must be called with @task->sighand->siglock held.
254  */
255 void task_clear_group_stop_pending(struct task_struct *task)
256 {
257         task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
258 }
259
260 /**
261  * task_participate_group_stop - participate in a group stop
262  * @task: task participating in a group stop
263  *
264  * @task has GROUP_STOP_PENDING set and is participating in a group stop.
265  * Group stop states are cleared and the group stop count is consumed if
266  * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
267  * stop, the appropriate %SIGNAL_* flags are set.
268  *
269  * CONTEXT:
270  * Must be called with @task->sighand->siglock held.
271  */
272 static bool task_participate_group_stop(struct task_struct *task)
273 {
274         struct signal_struct *sig = task->signal;
275         bool consume = task->group_stop & GROUP_STOP_CONSUME;
276
277         WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
278
279         task_clear_group_stop_pending(task);
280
281         if (!consume)
282                 return false;
283
284         if (!WARN_ON_ONCE(sig->group_stop_count == 0))
285                 sig->group_stop_count--;
286
287         if (!sig->group_stop_count) {
288                 sig->flags = SIGNAL_STOP_STOPPED;
289                 return true;
290         }
291         return false;
292 }
293
294 /*
295  * allocate a new signal queue record
296  * - this may be called without locks if and only if t == current, otherwise an
297  *   appopriate lock must be held to stop the target task from exiting
298  */
299 static struct sigqueue *
300 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
301 {
302         struct sigqueue *q = NULL;
303         struct user_struct *user;
304
305         /*
306          * Protect access to @t credentials. This can go away when all
307          * callers hold rcu read lock.
308          */
309         rcu_read_lock();
310         user = get_uid(__task_cred(t)->user);
311         atomic_inc(&user->sigpending);
312         rcu_read_unlock();
313
314         if (override_rlimit ||
315             atomic_read(&user->sigpending) <=
316                         task_rlimit(t, RLIMIT_SIGPENDING)) {
317                 q = kmem_cache_alloc(sigqueue_cachep, flags);
318         } else {
319                 print_dropped_signal(sig);
320         }
321
322         if (unlikely(q == NULL)) {
323                 atomic_dec(&user->sigpending);
324                 free_uid(user);
325         } else {
326                 INIT_LIST_HEAD(&q->list);
327                 q->flags = 0;
328                 q->user = user;
329         }
330
331         return q;
332 }
333
334 static void __sigqueue_free(struct sigqueue *q)
335 {
336         if (q->flags & SIGQUEUE_PREALLOC)
337                 return;
338         atomic_dec(&q->user->sigpending);
339         free_uid(q->user);
340         kmem_cache_free(sigqueue_cachep, q);
341 }
342
343 void flush_sigqueue(struct sigpending *queue)
344 {
345         struct sigqueue *q;
346
347         sigemptyset(&queue->signal);
348         while (!list_empty(&queue->list)) {
349                 q = list_entry(queue->list.next, struct sigqueue , list);
350                 list_del_init(&q->list);
351                 __sigqueue_free(q);
352         }
353 }
354
355 /*
356  * Flush all pending signals for a task.
357  */
358 void __flush_signals(struct task_struct *t)
359 {
360         clear_tsk_thread_flag(t, TIF_SIGPENDING);
361         flush_sigqueue(&t->pending);
362         flush_sigqueue(&t->signal->shared_pending);
363 }
364
365 void flush_signals(struct task_struct *t)
366 {
367         unsigned long flags;
368
369         spin_lock_irqsave(&t->sighand->siglock, flags);
370         __flush_signals(t);
371         spin_unlock_irqrestore(&t->sighand->siglock, flags);
372 }
373
374 static void __flush_itimer_signals(struct sigpending *pending)
375 {
376         sigset_t signal, retain;
377         struct sigqueue *q, *n;
378
379         signal = pending->signal;
380         sigemptyset(&retain);
381
382         list_for_each_entry_safe(q, n, &pending->list, list) {
383                 int sig = q->info.si_signo;
384
385                 if (likely(q->info.si_code != SI_TIMER)) {
386                         sigaddset(&retain, sig);
387                 } else {
388                         sigdelset(&signal, sig);
389                         list_del_init(&q->list);
390                         __sigqueue_free(q);
391                 }
392         }
393
394         sigorsets(&pending->signal, &signal, &retain);
395 }
396
397 void flush_itimer_signals(void)
398 {
399         struct task_struct *tsk = current;
400         unsigned long flags;
401
402         spin_lock_irqsave(&tsk->sighand->siglock, flags);
403         __flush_itimer_signals(&tsk->pending);
404         __flush_itimer_signals(&tsk->signal->shared_pending);
405         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
406 }
407
408 void ignore_signals(struct task_struct *t)
409 {
410         int i;
411
412         for (i = 0; i < _NSIG; ++i)
413                 t->sighand->action[i].sa.sa_handler = SIG_IGN;
414
415         flush_signals(t);
416 }
417
418 /*
419  * Flush all handlers for a task.
420  */
421
422 void
423 flush_signal_handlers(struct task_struct *t, int force_default)
424 {
425         int i;
426         struct k_sigaction *ka = &t->sighand->action[0];
427         for (i = _NSIG ; i != 0 ; i--) {
428                 if (force_default || ka->sa.sa_handler != SIG_IGN)
429                         ka->sa.sa_handler = SIG_DFL;
430                 ka->sa.sa_flags = 0;
431                 sigemptyset(&ka->sa.sa_mask);
432                 ka++;
433         }
434 }
435
436 int unhandled_signal(struct task_struct *tsk, int sig)
437 {
438         void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
439         if (is_global_init(tsk))
440                 return 1;
441         if (handler != SIG_IGN && handler != SIG_DFL)
442                 return 0;
443         return !tracehook_consider_fatal_signal(tsk, sig);
444 }
445
446
447 /* Notify the system that a driver wants to block all signals for this
448  * process, and wants to be notified if any signals at all were to be
449  * sent/acted upon.  If the notifier routine returns non-zero, then the
450  * signal will be acted upon after all.  If the notifier routine returns 0,
451  * then then signal will be blocked.  Only one block per process is
452  * allowed.  priv is a pointer to private data that the notifier routine
453  * can use to determine if the signal should be blocked or not.  */
454
455 void
456 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
457 {
458         unsigned long flags;
459
460         spin_lock_irqsave(&current->sighand->siglock, flags);
461         current->notifier_mask = mask;
462         current->notifier_data = priv;
463         current->notifier = notifier;
464         spin_unlock_irqrestore(&current->sighand->siglock, flags);
465 }
466
467 /* Notify the system that blocking has ended. */
468
469 void
470 unblock_all_signals(void)
471 {
472         unsigned long flags;
473
474         spin_lock_irqsave(&current->sighand->siglock, flags);
475         current->notifier = NULL;
476         current->notifier_data = NULL;
477         recalc_sigpending();
478         spin_unlock_irqrestore(&current->sighand->siglock, flags);
479 }
480
481 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
482 {
483         struct sigqueue *q, *first = NULL;
484
485         /*
486          * Collect the siginfo appropriate to this signal.  Check if
487          * there is another siginfo for the same signal.
488         */
489         list_for_each_entry(q, &list->list, list) {
490                 if (q->info.si_signo == sig) {
491                         if (first)
492                                 goto still_pending;
493                         first = q;
494                 }
495         }
496
497         sigdelset(&list->signal, sig);
498
499         if (first) {
500 still_pending:
501                 list_del_init(&first->list);
502                 copy_siginfo(info, &first->info);
503                 __sigqueue_free(first);
504         } else {
505                 /* Ok, it wasn't in the queue.  This must be
506                    a fast-pathed signal or we must have been
507                    out of queue space.  So zero out the info.
508                  */
509                 info->si_signo = sig;
510                 info->si_errno = 0;
511                 info->si_code = SI_USER;
512                 info->si_pid = 0;
513                 info->si_uid = 0;
514         }
515 }
516
517 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
518                         siginfo_t *info)
519 {
520         int sig = next_signal(pending, mask);
521
522         if (sig) {
523                 if (current->notifier) {
524                         if (sigismember(current->notifier_mask, sig)) {
525                                 if (!(current->notifier)(current->notifier_data)) {
526                                         clear_thread_flag(TIF_SIGPENDING);
527                                         return 0;
528                                 }
529                         }
530                 }
531
532                 collect_signal(sig, pending, info);
533         }
534
535         return sig;
536 }
537
538 /*
539  * Dequeue a signal and return the element to the caller, which is 
540  * expected to free it.
541  *
542  * All callers have to hold the siglock.
543  */
544 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
545 {
546         int signr;
547
548         /* We only dequeue private signals from ourselves, we don't let
549          * signalfd steal them
550          */
551         signr = __dequeue_signal(&tsk->pending, mask, info);
552         if (!signr) {
553                 signr = __dequeue_signal(&tsk->signal->shared_pending,
554                                          mask, info);
555                 /*
556                  * itimer signal ?
557                  *
558                  * itimers are process shared and we restart periodic
559                  * itimers in the signal delivery path to prevent DoS
560                  * attacks in the high resolution timer case. This is
561                  * compliant with the old way of self restarting
562                  * itimers, as the SIGALRM is a legacy signal and only
563                  * queued once. Changing the restart behaviour to
564                  * restart the timer in the signal dequeue path is
565                  * reducing the timer noise on heavy loaded !highres
566                  * systems too.
567                  */
568                 if (unlikely(signr == SIGALRM)) {
569                         struct hrtimer *tmr = &tsk->signal->real_timer;
570
571                         if (!hrtimer_is_queued(tmr) &&
572                             tsk->signal->it_real_incr.tv64 != 0) {
573                                 hrtimer_forward(tmr, tmr->base->get_time(),
574                                                 tsk->signal->it_real_incr);
575                                 hrtimer_restart(tmr);
576                         }
577                 }
578         }
579
580         recalc_sigpending();
581         if (!signr)
582                 return 0;
583
584         if (unlikely(sig_kernel_stop(signr))) {
585                 /*
586                  * Set a marker that we have dequeued a stop signal.  Our
587                  * caller might release the siglock and then the pending
588                  * stop signal it is about to process is no longer in the
589                  * pending bitmasks, but must still be cleared by a SIGCONT
590                  * (and overruled by a SIGKILL).  So those cases clear this
591                  * shared flag after we've set it.  Note that this flag may
592                  * remain set after the signal we return is ignored or
593                  * handled.  That doesn't matter because its only purpose
594                  * is to alert stop-signal processing code when another
595                  * processor has come along and cleared the flag.
596                  */
597                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
598         }
599         if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
600                 /*
601                  * Release the siglock to ensure proper locking order
602                  * of timer locks outside of siglocks.  Note, we leave
603                  * irqs disabled here, since the posix-timers code is
604                  * about to disable them again anyway.
605                  */
606                 spin_unlock(&tsk->sighand->siglock);
607                 do_schedule_next_timer(info);
608                 spin_lock(&tsk->sighand->siglock);
609         }
610         return signr;
611 }
612
613 /*
614  * Tell a process that it has a new active signal..
615  *
616  * NOTE! we rely on the previous spin_lock to
617  * lock interrupts for us! We can only be called with
618  * "siglock" held, and the local interrupt must
619  * have been disabled when that got acquired!
620  *
621  * No need to set need_resched since signal event passing
622  * goes through ->blocked
623  */
624 void signal_wake_up(struct task_struct *t, int resume)
625 {
626         unsigned int mask;
627
628         set_tsk_thread_flag(t, TIF_SIGPENDING);
629
630         /*
631          * For SIGKILL, we want to wake it up in the stopped/traced/killable
632          * case. We don't check t->state here because there is a race with it
633          * executing another processor and just now entering stopped state.
634          * By using wake_up_state, we ensure the process will wake up and
635          * handle its death signal.
636          */
637         mask = TASK_INTERRUPTIBLE;
638         if (resume)
639                 mask |= TASK_WAKEKILL;
640         if (!wake_up_state(t, mask))
641                 kick_process(t);
642 }
643
644 /*
645  * Remove signals in mask from the pending set and queue.
646  * Returns 1 if any signals were found.
647  *
648  * All callers must be holding the siglock.
649  *
650  * This version takes a sigset mask and looks at all signals,
651  * not just those in the first mask word.
652  */
653 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
654 {
655         struct sigqueue *q, *n;
656         sigset_t m;
657
658         sigandsets(&m, mask, &s->signal);
659         if (sigisemptyset(&m))
660                 return 0;
661
662         signandsets(&s->signal, &s->signal, mask);
663         list_for_each_entry_safe(q, n, &s->list, list) {
664                 if (sigismember(mask, q->info.si_signo)) {
665                         list_del_init(&q->list);
666                         __sigqueue_free(q);
667                 }
668         }
669         return 1;
670 }
671 /*
672  * Remove signals in mask from the pending set and queue.
673  * Returns 1 if any signals were found.
674  *
675  * All callers must be holding the siglock.
676  */
677 static int rm_from_queue(unsigned long mask, struct sigpending *s)
678 {
679         struct sigqueue *q, *n;
680
681         if (!sigtestsetmask(&s->signal, mask))
682                 return 0;
683
684         sigdelsetmask(&s->signal, mask);
685         list_for_each_entry_safe(q, n, &s->list, list) {
686                 if (q->info.si_signo < SIGRTMIN &&
687                     (mask & sigmask(q->info.si_signo))) {
688                         list_del_init(&q->list);
689                         __sigqueue_free(q);
690                 }
691         }
692         return 1;
693 }
694
695 static inline int is_si_special(const struct siginfo *info)
696 {
697         return info <= SEND_SIG_FORCED;
698 }
699
700 static inline bool si_fromuser(const struct siginfo *info)
701 {
702         return info == SEND_SIG_NOINFO ||
703                 (!is_si_special(info) && SI_FROMUSER(info));
704 }
705
706 /*
707  * Bad permissions for sending the signal
708  * - the caller must hold the RCU read lock
709  */
710 static int check_kill_permission(int sig, struct siginfo *info,
711                                  struct task_struct *t)
712 {
713         const struct cred *cred, *tcred;
714         struct pid *sid;
715         int error;
716
717         if (!valid_signal(sig))
718                 return -EINVAL;
719
720         if (!si_fromuser(info))
721                 return 0;
722
723         error = audit_signal_info(sig, t); /* Let audit system see the signal */
724         if (error)
725                 return error;
726
727         cred = current_cred();
728         tcred = __task_cred(t);
729         if (!same_thread_group(current, t) &&
730             (cred->euid ^ tcred->suid) &&
731             (cred->euid ^ tcred->uid) &&
732             (cred->uid  ^ tcred->suid) &&
733             (cred->uid  ^ tcred->uid) &&
734             !capable(CAP_KILL)) {
735                 switch (sig) {
736                 case SIGCONT:
737                         sid = task_session(t);
738                         /*
739                          * We don't return the error if sid == NULL. The
740                          * task was unhashed, the caller must notice this.
741                          */
742                         if (!sid || sid == task_session(current))
743                                 break;
744                 default:
745                         return -EPERM;
746                 }
747         }
748
749         return security_task_kill(t, info, sig, 0);
750 }
751
752 /*
753  * Handle magic process-wide effects of stop/continue signals. Unlike
754  * the signal actions, these happen immediately at signal-generation
755  * time regardless of blocking, ignoring, or handling.  This does the
756  * actual continuing for SIGCONT, but not the actual stopping for stop
757  * signals. The process stop is done as a signal action for SIG_DFL.
758  *
759  * Returns true if the signal should be actually delivered, otherwise
760  * it should be dropped.
761  */
762 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
763 {
764         struct signal_struct *signal = p->signal;
765         struct task_struct *t;
766
767         if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
768                 /*
769                  * The process is in the middle of dying, nothing to do.
770                  */
771         } else if (sig_kernel_stop(sig)) {
772                 /*
773                  * This is a stop signal.  Remove SIGCONT from all queues.
774                  */
775                 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
776                 t = p;
777                 do {
778                         rm_from_queue(sigmask(SIGCONT), &t->pending);
779                 } while_each_thread(p, t);
780         } else if (sig == SIGCONT) {
781                 unsigned int why;
782                 /*
783                  * Remove all stop signals from all queues,
784                  * and wake all threads.
785                  */
786                 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
787                 t = p;
788                 do {
789                         unsigned int state;
790
791                         task_clear_group_stop_pending(t);
792
793                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
794                         /*
795                          * If there is a handler for SIGCONT, we must make
796                          * sure that no thread returns to user mode before
797                          * we post the signal, in case it was the only
798                          * thread eligible to run the signal handler--then
799                          * it must not do anything between resuming and
800                          * running the handler.  With the TIF_SIGPENDING
801                          * flag set, the thread will pause and acquire the
802                          * siglock that we hold now and until we've queued
803                          * the pending signal.
804                          *
805                          * Wake up the stopped thread _after_ setting
806                          * TIF_SIGPENDING
807                          */
808                         state = __TASK_STOPPED;
809                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
810                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
811                                 state |= TASK_INTERRUPTIBLE;
812                         }
813                         wake_up_state(t, state);
814                 } while_each_thread(p, t);
815
816                 /*
817                  * Notify the parent with CLD_CONTINUED if we were stopped.
818                  *
819                  * If we were in the middle of a group stop, we pretend it
820                  * was already finished, and then continued. Since SIGCHLD
821                  * doesn't queue we report only CLD_STOPPED, as if the next
822                  * CLD_CONTINUED was dropped.
823                  */
824                 why = 0;
825                 if (signal->flags & SIGNAL_STOP_STOPPED)
826                         why |= SIGNAL_CLD_CONTINUED;
827                 else if (signal->group_stop_count)
828                         why |= SIGNAL_CLD_STOPPED;
829
830                 if (why) {
831                         /*
832                          * The first thread which returns from do_signal_stop()
833                          * will take ->siglock, notice SIGNAL_CLD_MASK, and
834                          * notify its parent. See get_signal_to_deliver().
835                          */
836                         signal->flags = why | SIGNAL_STOP_CONTINUED;
837                         signal->group_stop_count = 0;
838                         signal->group_exit_code = 0;
839                 } else {
840                         /*
841                          * We are not stopped, but there could be a stop
842                          * signal in the middle of being processed after
843                          * being removed from the queue.  Clear that too.
844                          */
845                         signal->flags &= ~SIGNAL_STOP_DEQUEUED;
846                 }
847         }
848
849         return !sig_ignored(p, sig, from_ancestor_ns);
850 }
851
852 /*
853  * Test if P wants to take SIG.  After we've checked all threads with this,
854  * it's equivalent to finding no threads not blocking SIG.  Any threads not
855  * blocking SIG were ruled out because they are not running and already
856  * have pending signals.  Such threads will dequeue from the shared queue
857  * as soon as they're available, so putting the signal on the shared queue
858  * will be equivalent to sending it to one such thread.
859  */
860 static inline int wants_signal(int sig, struct task_struct *p)
861 {
862         if (sigismember(&p->blocked, sig))
863                 return 0;
864         if (p->flags & PF_EXITING)
865                 return 0;
866         if (sig == SIGKILL)
867                 return 1;
868         if (task_is_stopped_or_traced(p))
869                 return 0;
870         return task_curr(p) || !signal_pending(p);
871 }
872
873 static void complete_signal(int sig, struct task_struct *p, int group)
874 {
875         struct signal_struct *signal = p->signal;
876         struct task_struct *t;
877
878         /*
879          * Now find a thread we can wake up to take the signal off the queue.
880          *
881          * If the main thread wants the signal, it gets first crack.
882          * Probably the least surprising to the average bear.
883          */
884         if (wants_signal(sig, p))
885                 t = p;
886         else if (!group || thread_group_empty(p))
887                 /*
888                  * There is just one thread and it does not need to be woken.
889                  * It will dequeue unblocked signals before it runs again.
890                  */
891                 return;
892         else {
893                 /*
894                  * Otherwise try to find a suitable thread.
895                  */
896                 t = signal->curr_target;
897                 while (!wants_signal(sig, t)) {
898                         t = next_thread(t);
899                         if (t == signal->curr_target)
900                                 /*
901                                  * No thread needs to be woken.
902                                  * Any eligible threads will see
903                                  * the signal in the queue soon.
904                                  */
905                                 return;
906                 }
907                 signal->curr_target = t;
908         }
909
910         /*
911          * Found a killable thread.  If the signal will be fatal,
912          * then start taking the whole group down immediately.
913          */
914         if (sig_fatal(p, sig) &&
915             !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
916             !sigismember(&t->real_blocked, sig) &&
917             (sig == SIGKILL ||
918              !tracehook_consider_fatal_signal(t, sig))) {
919                 /*
920                  * This signal will be fatal to the whole group.
921                  */
922                 if (!sig_kernel_coredump(sig)) {
923                         /*
924                          * Start a group exit and wake everybody up.
925                          * This way we don't have other threads
926                          * running and doing things after a slower
927                          * thread has the fatal signal pending.
928                          */
929                         signal->flags = SIGNAL_GROUP_EXIT;
930                         signal->group_exit_code = sig;
931                         signal->group_stop_count = 0;
932                         t = p;
933                         do {
934                                 task_clear_group_stop_pending(t);
935                                 sigaddset(&t->pending.signal, SIGKILL);
936                                 signal_wake_up(t, 1);
937                         } while_each_thread(p, t);
938                         return;
939                 }
940         }
941
942         /*
943          * The signal is already in the shared-pending queue.
944          * Tell the chosen thread to wake up and dequeue it.
945          */
946         signal_wake_up(t, sig == SIGKILL);
947         return;
948 }
949
950 static inline int legacy_queue(struct sigpending *signals, int sig)
951 {
952         return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
953 }
954
955 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
956                         int group, int from_ancestor_ns)
957 {
958         struct sigpending *pending;
959         struct sigqueue *q;
960         int override_rlimit;
961
962         trace_signal_generate(sig, info, t);
963
964         assert_spin_locked(&t->sighand->siglock);
965
966         if (!prepare_signal(sig, t, from_ancestor_ns))
967                 return 0;
968
969         pending = group ? &t->signal->shared_pending : &t->pending;
970         /*
971          * Short-circuit ignored signals and support queuing
972          * exactly one non-rt signal, so that we can get more
973          * detailed information about the cause of the signal.
974          */
975         if (legacy_queue(pending, sig))
976                 return 0;
977         /*
978          * fast-pathed signals for kernel-internal things like SIGSTOP
979          * or SIGKILL.
980          */
981         if (info == SEND_SIG_FORCED)
982                 goto out_set;
983
984         /* Real-time signals must be queued if sent by sigqueue, or
985            some other real-time mechanism.  It is implementation
986            defined whether kill() does so.  We attempt to do so, on
987            the principle of least surprise, but since kill is not
988            allowed to fail with EAGAIN when low on memory we just
989            make sure at least one signal gets delivered and don't
990            pass on the info struct.  */
991
992         if (sig < SIGRTMIN)
993                 override_rlimit = (is_si_special(info) || info->si_code >= 0);
994         else
995                 override_rlimit = 0;
996
997         q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
998                 override_rlimit);
999         if (q) {
1000                 list_add_tail(&q->list, &pending->list);
1001                 switch ((unsigned long) info) {
1002                 case (unsigned long) SEND_SIG_NOINFO:
1003                         q->info.si_signo = sig;
1004                         q->info.si_errno = 0;
1005                         q->info.si_code = SI_USER;
1006                         q->info.si_pid = task_tgid_nr_ns(current,
1007                                                         task_active_pid_ns(t));
1008                         q->info.si_uid = current_uid();
1009                         break;
1010                 case (unsigned long) SEND_SIG_PRIV:
1011                         q->info.si_signo = sig;
1012                         q->info.si_errno = 0;
1013                         q->info.si_code = SI_KERNEL;
1014                         q->info.si_pid = 0;
1015                         q->info.si_uid = 0;
1016                         break;
1017                 default:
1018                         copy_siginfo(&q->info, info);
1019                         if (from_ancestor_ns)
1020                                 q->info.si_pid = 0;
1021                         break;
1022                 }
1023         } else if (!is_si_special(info)) {
1024                 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1025                         /*
1026                          * Queue overflow, abort.  We may abort if the
1027                          * signal was rt and sent by user using something
1028                          * other than kill().
1029                          */
1030                         trace_signal_overflow_fail(sig, group, info);
1031                         return -EAGAIN;
1032                 } else {
1033                         /*
1034                          * This is a silent loss of information.  We still
1035                          * send the signal, but the *info bits are lost.
1036                          */
1037                         trace_signal_lose_info(sig, group, info);
1038                 }
1039         }
1040
1041 out_set:
1042         signalfd_notify(t, sig);
1043         sigaddset(&pending->signal, sig);
1044         complete_signal(sig, t, group);
1045         return 0;
1046 }
1047
1048 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1049                         int group)
1050 {
1051         int from_ancestor_ns = 0;
1052
1053 #ifdef CONFIG_PID_NS
1054         from_ancestor_ns = si_fromuser(info) &&
1055                            !task_pid_nr_ns(current, task_active_pid_ns(t));
1056 #endif
1057
1058         return __send_signal(sig, info, t, group, from_ancestor_ns);
1059 }
1060
1061 static void print_fatal_signal(struct pt_regs *regs, int signr)
1062 {
1063         printk("%s/%d: potentially unexpected fatal signal %d.\n",
1064                 current->comm, task_pid_nr(current), signr);
1065
1066 #if defined(__i386__) && !defined(__arch_um__)
1067         printk("code at %08lx: ", regs->ip);
1068         {
1069                 int i;
1070                 for (i = 0; i < 16; i++) {
1071                         unsigned char insn;
1072
1073                         if (get_user(insn, (unsigned char *)(regs->ip + i)))
1074                                 break;
1075                         printk("%02x ", insn);
1076                 }
1077         }
1078 #endif
1079         printk("\n");
1080         preempt_disable();
1081         show_regs(regs);
1082         preempt_enable();
1083 }
1084
1085 static int __init setup_print_fatal_signals(char *str)
1086 {
1087         get_option (&str, &print_fatal_signals);
1088
1089         return 1;
1090 }
1091
1092 __setup("print-fatal-signals=", setup_print_fatal_signals);
1093
1094 int
1095 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1096 {
1097         return send_signal(sig, info, p, 1);
1098 }
1099
1100 static int
1101 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1102 {
1103         return send_signal(sig, info, t, 0);
1104 }
1105
1106 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1107                         bool group)
1108 {
1109         unsigned long flags;
1110         int ret = -ESRCH;
1111
1112         if (lock_task_sighand(p, &flags)) {
1113                 ret = send_signal(sig, info, p, group);
1114                 unlock_task_sighand(p, &flags);
1115         }
1116
1117         return ret;
1118 }
1119
1120 /*
1121  * Force a signal that the process can't ignore: if necessary
1122  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1123  *
1124  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1125  * since we do not want to have a signal handler that was blocked
1126  * be invoked when user space had explicitly blocked it.
1127  *
1128  * We don't want to have recursive SIGSEGV's etc, for example,
1129  * that is why we also clear SIGNAL_UNKILLABLE.
1130  */
1131 int
1132 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1133 {
1134         unsigned long int flags;
1135         int ret, blocked, ignored;
1136         struct k_sigaction *action;
1137
1138         spin_lock_irqsave(&t->sighand->siglock, flags);
1139         action = &t->sighand->action[sig-1];
1140         ignored = action->sa.sa_handler == SIG_IGN;
1141         blocked = sigismember(&t->blocked, sig);
1142         if (blocked || ignored) {
1143                 action->sa.sa_handler = SIG_DFL;
1144                 if (blocked) {
1145                         sigdelset(&t->blocked, sig);
1146                         recalc_sigpending_and_wake(t);
1147                 }
1148         }
1149         if (action->sa.sa_handler == SIG_DFL)
1150                 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1151         ret = specific_send_sig_info(sig, info, t);
1152         spin_unlock_irqrestore(&t->sighand->siglock, flags);
1153
1154         return ret;
1155 }
1156
1157 /*
1158  * Nuke all other threads in the group.
1159  */
1160 int zap_other_threads(struct task_struct *p)
1161 {
1162         struct task_struct *t = p;
1163         int count = 0;
1164
1165         p->signal->group_stop_count = 0;
1166
1167         while_each_thread(p, t) {
1168                 task_clear_group_stop_pending(t);
1169                 count++;
1170
1171                 /* Don't bother with already dead threads */
1172                 if (t->exit_state)
1173                         continue;
1174                 sigaddset(&t->pending.signal, SIGKILL);
1175                 signal_wake_up(t, 1);
1176         }
1177
1178         return count;
1179 }
1180
1181 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1182                                            unsigned long *flags)
1183 {
1184         struct sighand_struct *sighand;
1185
1186         rcu_read_lock();
1187         for (;;) {
1188                 sighand = rcu_dereference(tsk->sighand);
1189                 if (unlikely(sighand == NULL))
1190                         break;
1191
1192                 spin_lock_irqsave(&sighand->siglock, *flags);
1193                 if (likely(sighand == tsk->sighand))
1194                         break;
1195                 spin_unlock_irqrestore(&sighand->siglock, *flags);
1196         }
1197         rcu_read_unlock();
1198
1199         return sighand;
1200 }
1201
1202 /*
1203  * send signal info to all the members of a group
1204  */
1205 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1206 {
1207         int ret;
1208
1209         rcu_read_lock();
1210         ret = check_kill_permission(sig, info, p);
1211         rcu_read_unlock();
1212
1213         if (!ret && sig)
1214                 ret = do_send_sig_info(sig, info, p, true);
1215
1216         return ret;
1217 }
1218
1219 /*
1220  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1221  * control characters do (^C, ^Z etc)
1222  * - the caller must hold at least a readlock on tasklist_lock
1223  */
1224 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1225 {
1226         struct task_struct *p = NULL;
1227         int retval, success;
1228
1229         success = 0;
1230         retval = -ESRCH;
1231         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1232                 int err = group_send_sig_info(sig, info, p);
1233                 success |= !err;
1234                 retval = err;
1235         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1236         return success ? 0 : retval;
1237 }
1238
1239 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1240 {
1241         int error = -ESRCH;
1242         struct task_struct *p;
1243
1244         rcu_read_lock();
1245 retry:
1246         p = pid_task(pid, PIDTYPE_PID);
1247         if (p) {
1248                 error = group_send_sig_info(sig, info, p);
1249                 if (unlikely(error == -ESRCH))
1250                         /*
1251                          * The task was unhashed in between, try again.
1252                          * If it is dead, pid_task() will return NULL,
1253                          * if we race with de_thread() it will find the
1254                          * new leader.
1255                          */
1256                         goto retry;
1257         }
1258         rcu_read_unlock();
1259
1260         return error;
1261 }
1262
1263 int
1264 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1265 {
1266         int error;
1267         rcu_read_lock();
1268         error = kill_pid_info(sig, info, find_vpid(pid));
1269         rcu_read_unlock();
1270         return error;
1271 }
1272
1273 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1274 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1275                       uid_t uid, uid_t euid, u32 secid)
1276 {
1277         int ret = -EINVAL;
1278         struct task_struct *p;
1279         const struct cred *pcred;
1280         unsigned long flags;
1281
1282         if (!valid_signal(sig))
1283                 return ret;
1284
1285         rcu_read_lock();
1286         p = pid_task(pid, PIDTYPE_PID);
1287         if (!p) {
1288                 ret = -ESRCH;
1289                 goto out_unlock;
1290         }
1291         pcred = __task_cred(p);
1292         if (si_fromuser(info) &&
1293             euid != pcred->suid && euid != pcred->uid &&
1294             uid  != pcred->suid && uid  != pcred->uid) {
1295                 ret = -EPERM;
1296                 goto out_unlock;
1297         }
1298         ret = security_task_kill(p, info, sig, secid);
1299         if (ret)
1300                 goto out_unlock;
1301
1302         if (sig) {
1303                 if (lock_task_sighand(p, &flags)) {
1304                         ret = __send_signal(sig, info, p, 1, 0);
1305                         unlock_task_sighand(p, &flags);
1306                 } else
1307                         ret = -ESRCH;
1308         }
1309 out_unlock:
1310         rcu_read_unlock();
1311         return ret;
1312 }
1313 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1314
1315 /*
1316  * kill_something_info() interprets pid in interesting ways just like kill(2).
1317  *
1318  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1319  * is probably wrong.  Should make it like BSD or SYSV.
1320  */
1321
1322 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1323 {
1324         int ret;
1325
1326         if (pid > 0) {
1327                 rcu_read_lock();
1328                 ret = kill_pid_info(sig, info, find_vpid(pid));
1329                 rcu_read_unlock();
1330                 return ret;
1331         }
1332
1333         read_lock(&tasklist_lock);
1334         if (pid != -1) {
1335                 ret = __kill_pgrp_info(sig, info,
1336                                 pid ? find_vpid(-pid) : task_pgrp(current));
1337         } else {
1338                 int retval = 0, count = 0;
1339                 struct task_struct * p;
1340
1341                 for_each_process(p) {
1342                         if (task_pid_vnr(p) > 1 &&
1343                                         !same_thread_group(p, current)) {
1344                                 int err = group_send_sig_info(sig, info, p);
1345                                 ++count;
1346                                 if (err != -EPERM)
1347                                         retval = err;
1348                         }
1349                 }
1350                 ret = count ? retval : -ESRCH;
1351         }
1352         read_unlock(&tasklist_lock);
1353
1354         return ret;
1355 }
1356
1357 /*
1358  * These are for backward compatibility with the rest of the kernel source.
1359  */
1360
1361 int
1362 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1363 {
1364         /*
1365          * Make sure legacy kernel users don't send in bad values
1366          * (normal paths check this in check_kill_permission).
1367          */
1368         if (!valid_signal(sig))
1369                 return -EINVAL;
1370
1371         return do_send_sig_info(sig, info, p, false);
1372 }
1373
1374 #define __si_special(priv) \
1375         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1376
1377 int
1378 send_sig(int sig, struct task_struct *p, int priv)
1379 {
1380         return send_sig_info(sig, __si_special(priv), p);
1381 }
1382
1383 void
1384 force_sig(int sig, struct task_struct *p)
1385 {
1386         force_sig_info(sig, SEND_SIG_PRIV, p);
1387 }
1388
1389 /*
1390  * When things go south during signal handling, we
1391  * will force a SIGSEGV. And if the signal that caused
1392  * the problem was already a SIGSEGV, we'll want to
1393  * make sure we don't even try to deliver the signal..
1394  */
1395 int
1396 force_sigsegv(int sig, struct task_struct *p)
1397 {
1398         if (sig == SIGSEGV) {
1399                 unsigned long flags;
1400                 spin_lock_irqsave(&p->sighand->siglock, flags);
1401                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1402                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1403         }
1404         force_sig(SIGSEGV, p);
1405         return 0;
1406 }
1407
1408 int kill_pgrp(struct pid *pid, int sig, int priv)
1409 {
1410         int ret;
1411
1412         read_lock(&tasklist_lock);
1413         ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1414         read_unlock(&tasklist_lock);
1415
1416         return ret;
1417 }
1418 EXPORT_SYMBOL(kill_pgrp);
1419
1420 int kill_pid(struct pid *pid, int sig, int priv)
1421 {
1422         return kill_pid_info(sig, __si_special(priv), pid);
1423 }
1424 EXPORT_SYMBOL(kill_pid);
1425
1426 /*
1427  * These functions support sending signals using preallocated sigqueue
1428  * structures.  This is needed "because realtime applications cannot
1429  * afford to lose notifications of asynchronous events, like timer
1430  * expirations or I/O completions".  In the case of Posix Timers
1431  * we allocate the sigqueue structure from the timer_create.  If this
1432  * allocation fails we are able to report the failure to the application
1433  * with an EAGAIN error.
1434  */
1435 struct sigqueue *sigqueue_alloc(void)
1436 {
1437         struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1438
1439         if (q)
1440                 q->flags |= SIGQUEUE_PREALLOC;
1441
1442         return q;
1443 }
1444
1445 void sigqueue_free(struct sigqueue *q)
1446 {
1447         unsigned long flags;
1448         spinlock_t *lock = &current->sighand->siglock;
1449
1450         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1451         /*
1452          * We must hold ->siglock while testing q->list
1453          * to serialize with collect_signal() or with
1454          * __exit_signal()->flush_sigqueue().
1455          */
1456         spin_lock_irqsave(lock, flags);
1457         q->flags &= ~SIGQUEUE_PREALLOC;
1458         /*
1459          * If it is queued it will be freed when dequeued,
1460          * like the "regular" sigqueue.
1461          */
1462         if (!list_empty(&q->list))
1463                 q = NULL;
1464         spin_unlock_irqrestore(lock, flags);
1465
1466         if (q)
1467                 __sigqueue_free(q);
1468 }
1469
1470 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1471 {
1472         int sig = q->info.si_signo;
1473         struct sigpending *pending;
1474         unsigned long flags;
1475         int ret;
1476
1477         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1478
1479         ret = -1;
1480         if (!likely(lock_task_sighand(t, &flags)))
1481                 goto ret;
1482
1483         ret = 1; /* the signal is ignored */
1484         if (!prepare_signal(sig, t, 0))
1485                 goto out;
1486
1487         ret = 0;
1488         if (unlikely(!list_empty(&q->list))) {
1489                 /*
1490                  * If an SI_TIMER entry is already queue just increment
1491                  * the overrun count.
1492                  */
1493                 BUG_ON(q->info.si_code != SI_TIMER);
1494                 q->info.si_overrun++;
1495                 goto out;
1496         }
1497         q->info.si_overrun = 0;
1498
1499         signalfd_notify(t, sig);
1500         pending = group ? &t->signal->shared_pending : &t->pending;
1501         list_add_tail(&q->list, &pending->list);
1502         sigaddset(&pending->signal, sig);
1503         complete_signal(sig, t, group);
1504 out:
1505         unlock_task_sighand(t, &flags);
1506 ret:
1507         return ret;
1508 }
1509
1510 /*
1511  * Let a parent know about the death of a child.
1512  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1513  *
1514  * Returns -1 if our parent ignored us and so we've switched to
1515  * self-reaping, or else @sig.
1516  */
1517 int do_notify_parent(struct task_struct *tsk, int sig)
1518 {
1519         struct siginfo info;
1520         unsigned long flags;
1521         struct sighand_struct *psig;
1522         int ret = sig;
1523
1524         BUG_ON(sig == -1);
1525
1526         /* do_notify_parent_cldstop should have been called instead.  */
1527         BUG_ON(task_is_stopped_or_traced(tsk));
1528
1529         BUG_ON(!task_ptrace(tsk) &&
1530                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1531
1532         info.si_signo = sig;
1533         info.si_errno = 0;
1534         /*
1535          * we are under tasklist_lock here so our parent is tied to
1536          * us and cannot exit and release its namespace.
1537          *
1538          * the only it can is to switch its nsproxy with sys_unshare,
1539          * bu uncharing pid namespaces is not allowed, so we'll always
1540          * see relevant namespace
1541          *
1542          * write_lock() currently calls preempt_disable() which is the
1543          * same as rcu_read_lock(), but according to Oleg, this is not
1544          * correct to rely on this
1545          */
1546         rcu_read_lock();
1547         info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1548         info.si_uid = __task_cred(tsk)->uid;
1549         rcu_read_unlock();
1550
1551         info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1552                                 tsk->signal->utime));
1553         info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1554                                 tsk->signal->stime));
1555
1556         info.si_status = tsk->exit_code & 0x7f;
1557         if (tsk->exit_code & 0x80)
1558                 info.si_code = CLD_DUMPED;
1559         else if (tsk->exit_code & 0x7f)
1560                 info.si_code = CLD_KILLED;
1561         else {
1562                 info.si_code = CLD_EXITED;
1563                 info.si_status = tsk->exit_code >> 8;
1564         }
1565
1566         psig = tsk->parent->sighand;
1567         spin_lock_irqsave(&psig->siglock, flags);
1568         if (!task_ptrace(tsk) && sig == SIGCHLD &&
1569             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1570              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1571                 /*
1572                  * We are exiting and our parent doesn't care.  POSIX.1
1573                  * defines special semantics for setting SIGCHLD to SIG_IGN
1574                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1575                  * automatically and not left for our parent's wait4 call.
1576                  * Rather than having the parent do it as a magic kind of
1577                  * signal handler, we just set this to tell do_exit that we
1578                  * can be cleaned up without becoming a zombie.  Note that
1579                  * we still call __wake_up_parent in this case, because a
1580                  * blocked sys_wait4 might now return -ECHILD.
1581                  *
1582                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1583                  * is implementation-defined: we do (if you don't want
1584                  * it, just use SIG_IGN instead).
1585                  */
1586                 ret = tsk->exit_signal = -1;
1587                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1588                         sig = -1;
1589         }
1590         if (valid_signal(sig) && sig > 0)
1591                 __group_send_sig_info(sig, &info, tsk->parent);
1592         __wake_up_parent(tsk, tsk->parent);
1593         spin_unlock_irqrestore(&psig->siglock, flags);
1594
1595         return ret;
1596 }
1597
1598 /**
1599  * do_notify_parent_cldstop - notify parent of stopped/continued state change
1600  * @tsk: task reporting the state change
1601  * @for_ptracer: the notification is for ptracer
1602  * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1603  *
1604  * Notify @tsk's parent that the stopped/continued state has changed.  If
1605  * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1606  * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1607  *
1608  * CONTEXT:
1609  * Must be called with tasklist_lock at least read locked.
1610  */
1611 static void do_notify_parent_cldstop(struct task_struct *tsk,
1612                                      bool for_ptracer, int why)
1613 {
1614         struct siginfo info;
1615         unsigned long flags;
1616         struct task_struct *parent;
1617         struct sighand_struct *sighand;
1618
1619         if (for_ptracer) {
1620                 parent = tsk->parent;
1621         } else {
1622                 tsk = tsk->group_leader;
1623                 parent = tsk->real_parent;
1624         }
1625
1626         info.si_signo = SIGCHLD;
1627         info.si_errno = 0;
1628         /*
1629          * see comment in do_notify_parent() abot the following 3 lines
1630          */
1631         rcu_read_lock();
1632         info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1633         info.si_uid = __task_cred(tsk)->uid;
1634         rcu_read_unlock();
1635
1636         info.si_utime = cputime_to_clock_t(tsk->utime);
1637         info.si_stime = cputime_to_clock_t(tsk->stime);
1638
1639         info.si_code = why;
1640         switch (why) {
1641         case CLD_CONTINUED:
1642                 info.si_status = SIGCONT;
1643                 break;
1644         case CLD_STOPPED:
1645                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1646                 break;
1647         case CLD_TRAPPED:
1648                 info.si_status = tsk->exit_code & 0x7f;
1649                 break;
1650         default:
1651                 BUG();
1652         }
1653
1654         sighand = parent->sighand;
1655         spin_lock_irqsave(&sighand->siglock, flags);
1656         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1657             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1658                 __group_send_sig_info(SIGCHLD, &info, parent);
1659         /*
1660          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1661          */
1662         __wake_up_parent(tsk, parent);
1663         spin_unlock_irqrestore(&sighand->siglock, flags);
1664 }
1665
1666 static inline int may_ptrace_stop(void)
1667 {
1668         if (!likely(task_ptrace(current)))
1669                 return 0;
1670         /*
1671          * Are we in the middle of do_coredump?
1672          * If so and our tracer is also part of the coredump stopping
1673          * is a deadlock situation, and pointless because our tracer
1674          * is dead so don't allow us to stop.
1675          * If SIGKILL was already sent before the caller unlocked
1676          * ->siglock we must see ->core_state != NULL. Otherwise it
1677          * is safe to enter schedule().
1678          */
1679         if (unlikely(current->mm->core_state) &&
1680             unlikely(current->mm == current->parent->mm))
1681                 return 0;
1682
1683         return 1;
1684 }
1685
1686 /*
1687  * Return nonzero if there is a SIGKILL that should be waking us up.
1688  * Called with the siglock held.
1689  */
1690 static int sigkill_pending(struct task_struct *tsk)
1691 {
1692         return  sigismember(&tsk->pending.signal, SIGKILL) ||
1693                 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1694 }
1695
1696 /*
1697  * This must be called with current->sighand->siglock held.
1698  *
1699  * This should be the path for all ptrace stops.
1700  * We always set current->last_siginfo while stopped here.
1701  * That makes it a way to test a stopped process for
1702  * being ptrace-stopped vs being job-control-stopped.
1703  *
1704  * If we actually decide not to stop at all because the tracer
1705  * is gone, we keep current->exit_code unless clear_code.
1706  */
1707 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1708         __releases(&current->sighand->siglock)
1709         __acquires(&current->sighand->siglock)
1710 {
1711         if (arch_ptrace_stop_needed(exit_code, info)) {
1712                 /*
1713                  * The arch code has something special to do before a
1714                  * ptrace stop.  This is allowed to block, e.g. for faults
1715                  * on user stack pages.  We can't keep the siglock while
1716                  * calling arch_ptrace_stop, so we must release it now.
1717                  * To preserve proper semantics, we must do this before
1718                  * any signal bookkeeping like checking group_stop_count.
1719                  * Meanwhile, a SIGKILL could come in before we retake the
1720                  * siglock.  That must prevent us from sleeping in TASK_TRACED.
1721                  * So after regaining the lock, we must check for SIGKILL.
1722                  */
1723                 spin_unlock_irq(&current->sighand->siglock);
1724                 arch_ptrace_stop(exit_code, info);
1725                 spin_lock_irq(&current->sighand->siglock);
1726                 if (sigkill_pending(current))
1727                         return;
1728         }
1729
1730         /*
1731          * If @why is CLD_STOPPED, we're trapping to participate in a group
1732          * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
1733          * while siglock was released for the arch hook, PENDING could be
1734          * clear now.  We act as if SIGCONT is received after TASK_TRACED
1735          * is entered - ignore it.
1736          */
1737         if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
1738                 task_participate_group_stop(current);
1739
1740         current->last_siginfo = info;
1741         current->exit_code = exit_code;
1742
1743         /*
1744          * TRACED should be visible before TRAPPING is cleared; otherwise,
1745          * the tracer might fail do_wait().
1746          */
1747         set_current_state(TASK_TRACED);
1748
1749         /*
1750          * We're committing to trapping.  Clearing GROUP_STOP_TRAPPING and
1751          * transition to TASK_TRACED should be atomic with respect to
1752          * siglock.  This hsould be done after the arch hook as siglock is
1753          * released and regrabbed across it.
1754          */
1755         task_clear_group_stop_trapping(current);
1756
1757         spin_unlock_irq(&current->sighand->siglock);
1758         read_lock(&tasklist_lock);
1759         if (may_ptrace_stop()) {
1760                 do_notify_parent_cldstop(current, task_ptrace(current), why);
1761                 /*
1762                  * Don't want to allow preemption here, because
1763                  * sys_ptrace() needs this task to be inactive.
1764                  *
1765                  * XXX: implement read_unlock_no_resched().
1766                  */
1767                 preempt_disable();
1768                 read_unlock(&tasklist_lock);
1769                 preempt_enable_no_resched();
1770                 schedule();
1771         } else {
1772                 /*
1773                  * By the time we got the lock, our tracer went away.
1774                  * Don't drop the lock yet, another tracer may come.
1775                  */
1776                 __set_current_state(TASK_RUNNING);
1777                 if (clear_code)
1778                         current->exit_code = 0;
1779                 read_unlock(&tasklist_lock);
1780         }
1781
1782         /*
1783          * While in TASK_TRACED, we were considered "frozen enough".
1784          * Now that we woke up, it's crucial if we're supposed to be
1785          * frozen that we freeze now before running anything substantial.
1786          */
1787         try_to_freeze();
1788
1789         /*
1790          * We are back.  Now reacquire the siglock before touching
1791          * last_siginfo, so that we are sure to have synchronized with
1792          * any signal-sending on another CPU that wants to examine it.
1793          */
1794         spin_lock_irq(&current->sighand->siglock);
1795         current->last_siginfo = NULL;
1796
1797         /*
1798          * Queued signals ignored us while we were stopped for tracing.
1799          * So check for any that we should take before resuming user mode.
1800          * This sets TIF_SIGPENDING, but never clears it.
1801          */
1802         recalc_sigpending_tsk(current);
1803 }
1804
1805 void ptrace_notify(int exit_code)
1806 {
1807         siginfo_t info;
1808
1809         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1810
1811         memset(&info, 0, sizeof info);
1812         info.si_signo = SIGTRAP;
1813         info.si_code = exit_code;
1814         info.si_pid = task_pid_vnr(current);
1815         info.si_uid = current_uid();
1816
1817         /* Let the debugger run.  */
1818         spin_lock_irq(&current->sighand->siglock);
1819         ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1820         spin_unlock_irq(&current->sighand->siglock);
1821 }
1822
1823 /*
1824  * This performs the stopping for SIGSTOP and other stop signals.
1825  * We have to stop all threads in the thread group.
1826  * Returns nonzero if we've actually stopped and released the siglock.
1827  * Returns zero if we didn't stop and still hold the siglock.
1828  */
1829 static int do_signal_stop(int signr)
1830 {
1831         struct signal_struct *sig = current->signal;
1832
1833         if (!(current->group_stop & GROUP_STOP_PENDING)) {
1834                 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
1835                 struct task_struct *t;
1836
1837                 /* signr will be recorded in task->group_stop for retries */
1838                 WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
1839
1840                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1841                     unlikely(signal_group_exit(sig)))
1842                         return 0;
1843                 /*
1844                  * There is no group stop already in progress.  We must
1845                  * initiate one now.
1846                  *
1847                  * While ptraced, a task may be resumed while group stop is
1848                  * still in effect and then receive a stop signal and
1849                  * initiate another group stop.  This deviates from the
1850                  * usual behavior as two consecutive stop signals can't
1851                  * cause two group stops when !ptraced.
1852                  *
1853                  * The condition can be distinguished by testing whether
1854                  * SIGNAL_STOP_STOPPED is already set.  Don't generate
1855                  * group_exit_code in such case.
1856                  *
1857                  * This is not necessary for SIGNAL_STOP_CONTINUED because
1858                  * an intervening stop signal is required to cause two
1859                  * continued events regardless of ptrace.
1860                  */
1861                 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1862                         sig->group_exit_code = signr;
1863                 else
1864                         WARN_ON_ONCE(!task_ptrace(current));
1865
1866                 current->group_stop &= ~GROUP_STOP_SIGMASK;
1867                 current->group_stop |= signr | gstop;
1868                 sig->group_stop_count = 1;
1869                 for (t = next_thread(current); t != current;
1870                      t = next_thread(t)) {
1871                         t->group_stop &= ~GROUP_STOP_SIGMASK;
1872                         /*
1873                          * Setting state to TASK_STOPPED for a group
1874                          * stop is always done with the siglock held,
1875                          * so this check has no races.
1876                          */
1877                         if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1878                                 t->group_stop |= signr | gstop;
1879                                 sig->group_stop_count++;
1880                                 signal_wake_up(t, 0);
1881                         } else {
1882                                 task_clear_group_stop_pending(t);
1883                         }
1884                 }
1885         }
1886 retry:
1887         if (likely(!task_ptrace(current))) {
1888                 int notify = 0;
1889
1890                 /*
1891                  * If there are no other threads in the group, or if there
1892                  * is a group stop in progress and we are the last to stop,
1893                  * report to the parent.
1894                  */
1895                 if (task_participate_group_stop(current))
1896                         notify = CLD_STOPPED;
1897
1898                 __set_current_state(TASK_STOPPED);
1899                 spin_unlock_irq(&current->sighand->siglock);
1900
1901                 if (notify) {
1902                         read_lock(&tasklist_lock);
1903                         do_notify_parent_cldstop(current, task_ptrace(current),
1904                                                  notify);
1905                         read_unlock(&tasklist_lock);
1906                 }
1907
1908                 /* Now we don't run again until woken by SIGCONT or SIGKILL */
1909                 schedule();
1910
1911                 spin_lock_irq(&current->sighand->siglock);
1912         } else {
1913                 ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
1914                             CLD_STOPPED, 0, NULL);
1915                 current->exit_code = 0;
1916         }
1917
1918         /*
1919          * GROUP_STOP_PENDING could be set if another group stop has
1920          * started since being woken up or ptrace wants us to transit
1921          * between TASK_STOPPED and TRACED.  Retry group stop.
1922          */
1923         if (current->group_stop & GROUP_STOP_PENDING) {
1924                 WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
1925                 goto retry;
1926         }
1927
1928         /* PTRACE_ATTACH might have raced with task killing, clear trapping */
1929         task_clear_group_stop_trapping(current);
1930
1931         spin_unlock_irq(&current->sighand->siglock);
1932
1933         tracehook_finish_jctl();
1934
1935         return 1;
1936 }
1937
1938 static int ptrace_signal(int signr, siginfo_t *info,
1939                          struct pt_regs *regs, void *cookie)
1940 {
1941         if (!task_ptrace(current))
1942                 return signr;
1943
1944         ptrace_signal_deliver(regs, cookie);
1945
1946         /* Let the debugger run.  */
1947         ptrace_stop(signr, CLD_TRAPPED, 0, info);
1948
1949         /* We're back.  Did the debugger cancel the sig?  */
1950         signr = current->exit_code;
1951         if (signr == 0)
1952                 return signr;
1953
1954         current->exit_code = 0;
1955
1956         /* Update the siginfo structure if the signal has
1957            changed.  If the debugger wanted something
1958            specific in the siginfo structure then it should
1959            have updated *info via PTRACE_SETSIGINFO.  */
1960         if (signr != info->si_signo) {
1961                 info->si_signo = signr;
1962                 info->si_errno = 0;
1963                 info->si_code = SI_USER;
1964                 info->si_pid = task_pid_vnr(current->parent);
1965                 info->si_uid = task_uid(current->parent);
1966         }
1967
1968         /* If the (new) signal is now blocked, requeue it.  */
1969         if (sigismember(&current->blocked, signr)) {
1970                 specific_send_sig_info(signr, info, current);
1971                 signr = 0;
1972         }
1973
1974         return signr;
1975 }
1976
1977 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1978                           struct pt_regs *regs, void *cookie)
1979 {
1980         struct sighand_struct *sighand = current->sighand;
1981         struct signal_struct *signal = current->signal;
1982         int signr;
1983
1984 relock:
1985         /*
1986          * We'll jump back here after any time we were stopped in TASK_STOPPED.
1987          * While in TASK_STOPPED, we were considered "frozen enough".
1988          * Now that we woke up, it's crucial if we're supposed to be
1989          * frozen that we freeze now before running anything substantial.
1990          */
1991         try_to_freeze();
1992
1993         spin_lock_irq(&sighand->siglock);
1994         /*
1995          * Every stopped thread goes here after wakeup. Check to see if
1996          * we should notify the parent, prepare_signal(SIGCONT) encodes
1997          * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1998          */
1999         if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2000                 struct task_struct *leader;
2001                 int why;
2002
2003                 if (signal->flags & SIGNAL_CLD_CONTINUED)
2004                         why = CLD_CONTINUED;
2005                 else
2006                         why = CLD_STOPPED;
2007
2008                 signal->flags &= ~SIGNAL_CLD_MASK;
2009
2010                 spin_unlock_irq(&sighand->siglock);
2011
2012                 read_lock(&tasklist_lock);
2013                 leader = current->group_leader;
2014                 do_notify_parent_cldstop(leader, task_ptrace(leader), why);
2015                 read_unlock(&tasklist_lock);
2016                 goto relock;
2017         }
2018
2019         for (;;) {
2020                 struct k_sigaction *ka;
2021                 /*
2022                  * Tracing can induce an artifical signal and choose sigaction.
2023                  * The return value in @signr determines the default action,
2024                  * but @info->si_signo is the signal number we will report.
2025                  */
2026                 signr = tracehook_get_signal(current, regs, info, return_ka);
2027                 if (unlikely(signr < 0))
2028                         goto relock;
2029                 if (unlikely(signr != 0))
2030                         ka = return_ka;
2031                 else {
2032                         if (unlikely(current->group_stop &
2033                                      GROUP_STOP_PENDING) && do_signal_stop(0))
2034                                 goto relock;
2035
2036                         signr = dequeue_signal(current, &current->blocked,
2037                                                info);
2038
2039                         if (!signr)
2040                                 break; /* will return 0 */
2041
2042                         if (signr != SIGKILL) {
2043                                 signr = ptrace_signal(signr, info,
2044                                                       regs, cookie);
2045                                 if (!signr)
2046                                         continue;
2047                         }
2048
2049                         ka = &sighand->action[signr-1];
2050                 }
2051
2052                 /* Trace actually delivered signals. */
2053                 trace_signal_deliver(signr, info, ka);
2054
2055                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
2056                         continue;
2057                 if (ka->sa.sa_handler != SIG_DFL) {
2058                         /* Run the handler.  */
2059                         *return_ka = *ka;
2060
2061                         if (ka->sa.sa_flags & SA_ONESHOT)
2062                                 ka->sa.sa_handler = SIG_DFL;
2063
2064                         break; /* will return non-zero "signr" value */
2065                 }
2066
2067                 /*
2068                  * Now we are doing the default action for this signal.
2069                  */
2070                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2071                         continue;
2072
2073                 /*
2074                  * Global init gets no signals it doesn't want.
2075                  * Container-init gets no signals it doesn't want from same
2076                  * container.
2077                  *
2078                  * Note that if global/container-init sees a sig_kernel_only()
2079                  * signal here, the signal must have been generated internally
2080                  * or must have come from an ancestor namespace. In either
2081                  * case, the signal cannot be dropped.
2082                  */
2083                 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2084                                 !sig_kernel_only(signr))
2085                         continue;
2086
2087                 if (sig_kernel_stop(signr)) {
2088                         /*
2089                          * The default action is to stop all threads in
2090                          * the thread group.  The job control signals
2091                          * do nothing in an orphaned pgrp, but SIGSTOP
2092                          * always works.  Note that siglock needs to be
2093                          * dropped during the call to is_orphaned_pgrp()
2094                          * because of lock ordering with tasklist_lock.
2095                          * This allows an intervening SIGCONT to be posted.
2096                          * We need to check for that and bail out if necessary.
2097                          */
2098                         if (signr != SIGSTOP) {
2099                                 spin_unlock_irq(&sighand->siglock);
2100
2101                                 /* signals can be posted during this window */
2102
2103                                 if (is_current_pgrp_orphaned())
2104                                         goto relock;
2105
2106                                 spin_lock_irq(&sighand->siglock);
2107                         }
2108
2109                         if (likely(do_signal_stop(info->si_signo))) {
2110                                 /* It released the siglock.  */
2111                                 goto relock;
2112                         }
2113
2114                         /*
2115                          * We didn't actually stop, due to a race
2116                          * with SIGCONT or something like that.
2117                          */
2118                         continue;
2119                 }
2120
2121                 spin_unlock_irq(&sighand->siglock);
2122
2123                 /*
2124                  * Anything else is fatal, maybe with a core dump.
2125                  */
2126                 current->flags |= PF_SIGNALED;
2127
2128                 if (sig_kernel_coredump(signr)) {
2129                         if (print_fatal_signals)
2130                                 print_fatal_signal(regs, info->si_signo);
2131                         /*
2132                          * If it was able to dump core, this kills all
2133                          * other threads in the group and synchronizes with
2134                          * their demise.  If we lost the race with another
2135                          * thread getting here, it set group_exit_code
2136                          * first and our do_group_exit call below will use
2137                          * that value and ignore the one we pass it.
2138                          */
2139                         do_coredump(info->si_signo, info->si_signo, regs);
2140                 }
2141
2142                 /*
2143                  * Death signals, no core dump.
2144                  */
2145                 do_group_exit(info->si_signo);
2146                 /* NOTREACHED */
2147         }
2148         spin_unlock_irq(&sighand->siglock);
2149         return signr;
2150 }
2151
2152 void exit_signals(struct task_struct *tsk)
2153 {
2154         int group_stop = 0;
2155         struct task_struct *t;
2156
2157         if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2158                 tsk->flags |= PF_EXITING;
2159                 return;
2160         }
2161
2162         spin_lock_irq(&tsk->sighand->siglock);
2163         /*
2164          * From now this task is not visible for group-wide signals,
2165          * see wants_signal(), do_signal_stop().
2166          */
2167         tsk->flags |= PF_EXITING;
2168         if (!signal_pending(tsk))
2169                 goto out;
2170
2171         /* It could be that __group_complete_signal() choose us to
2172          * notify about group-wide signal. Another thread should be
2173          * woken now to take the signal since we will not.
2174          */
2175         for (t = tsk; (t = next_thread(t)) != tsk; )
2176                 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2177                         recalc_sigpending_and_wake(t);
2178
2179         if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
2180             task_participate_group_stop(tsk))
2181                 group_stop = CLD_STOPPED;
2182 out:
2183         spin_unlock_irq(&tsk->sighand->siglock);
2184
2185         if (unlikely(group_stop)) {
2186                 read_lock(&tasklist_lock);
2187                 do_notify_parent_cldstop(tsk, task_ptrace(tsk), group_stop);
2188                 read_unlock(&tasklist_lock);
2189         }
2190 }
2191
2192 EXPORT_SYMBOL(recalc_sigpending);
2193 EXPORT_SYMBOL_GPL(dequeue_signal);
2194 EXPORT_SYMBOL(flush_signals);
2195 EXPORT_SYMBOL(force_sig);
2196 EXPORT_SYMBOL(send_sig);
2197 EXPORT_SYMBOL(send_sig_info);
2198 EXPORT_SYMBOL(sigprocmask);
2199 EXPORT_SYMBOL(block_all_signals);
2200 EXPORT_SYMBOL(unblock_all_signals);
2201
2202
2203 /*
2204  * System call entry points.
2205  */
2206
2207 SYSCALL_DEFINE0(restart_syscall)
2208 {
2209         struct restart_block *restart = &current_thread_info()->restart_block;
2210         return restart->fn(restart);
2211 }
2212
2213 long do_no_restart_syscall(struct restart_block *param)
2214 {
2215         return -EINTR;
2216 }
2217
2218 /*
2219  * We don't need to get the kernel lock - this is all local to this
2220  * particular thread.. (and that's good, because this is _heavily_
2221  * used by various programs)
2222  */
2223
2224 /*
2225  * This is also useful for kernel threads that want to temporarily
2226  * (or permanently) block certain signals.
2227  *
2228  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2229  * interface happily blocks "unblockable" signals like SIGKILL
2230  * and friends.
2231  */
2232 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2233 {
2234         int error;
2235
2236         spin_lock_irq(&current->sighand->siglock);
2237         if (oldset)
2238                 *oldset = current->blocked;
2239
2240         error = 0;
2241         switch (how) {
2242         case SIG_BLOCK:
2243                 sigorsets(&current->blocked, &current->blocked, set);
2244                 break;
2245         case SIG_UNBLOCK:
2246                 signandsets(&current->blocked, &current->blocked, set);
2247                 break;
2248         case SIG_SETMASK:
2249                 current->blocked = *set;
2250                 break;
2251         default:
2252                 error = -EINVAL;
2253         }
2254         recalc_sigpending();
2255         spin_unlock_irq(&current->sighand->siglock);
2256
2257         return error;
2258 }
2259
2260 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2261                 sigset_t __user *, oset, size_t, sigsetsize)
2262 {
2263         int error = -EINVAL;
2264         sigset_t old_set, new_set;
2265
2266         /* XXX: Don't preclude handling different sized sigset_t's.  */
2267         if (sigsetsize != sizeof(sigset_t))
2268                 goto out;
2269
2270         if (set) {
2271                 error = -EFAULT;
2272                 if (copy_from_user(&new_set, set, sizeof(*set)))
2273                         goto out;
2274                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2275
2276                 error = sigprocmask(how, &new_set, &old_set);
2277                 if (error)
2278                         goto out;
2279                 if (oset)
2280                         goto set_old;
2281         } else if (oset) {
2282                 spin_lock_irq(&current->sighand->siglock);
2283                 old_set = current->blocked;
2284                 spin_unlock_irq(&current->sighand->siglock);
2285
2286         set_old:
2287                 error = -EFAULT;
2288                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2289                         goto out;
2290         }
2291         error = 0;
2292 out:
2293         return error;
2294 }
2295
2296 long do_sigpending(void __user *set, unsigned long sigsetsize)
2297 {
2298         long error = -EINVAL;
2299         sigset_t pending;
2300
2301         if (sigsetsize > sizeof(sigset_t))
2302                 goto out;
2303
2304         spin_lock_irq(&current->sighand->siglock);
2305         sigorsets(&pending, &current->pending.signal,
2306                   &current->signal->shared_pending.signal);
2307         spin_unlock_irq(&current->sighand->siglock);
2308
2309         /* Outside the lock because only this thread touches it.  */
2310         sigandsets(&pending, &current->blocked, &pending);
2311
2312         error = -EFAULT;
2313         if (!copy_to_user(set, &pending, sigsetsize))
2314                 error = 0;
2315
2316 out:
2317         return error;
2318 }       
2319
2320 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2321 {
2322         return do_sigpending(set, sigsetsize);
2323 }
2324
2325 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2326
2327 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2328 {
2329         int err;
2330
2331         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2332                 return -EFAULT;
2333         if (from->si_code < 0)
2334                 return __copy_to_user(to, from, sizeof(siginfo_t))
2335                         ? -EFAULT : 0;
2336         /*
2337          * If you change siginfo_t structure, please be sure
2338          * this code is fixed accordingly.
2339          * Please remember to update the signalfd_copyinfo() function
2340          * inside fs/signalfd.c too, in case siginfo_t changes.
2341          * It should never copy any pad contained in the structure
2342          * to avoid security leaks, but must copy the generic
2343          * 3 ints plus the relevant union member.
2344          */
2345         err = __put_user(from->si_signo, &to->si_signo);
2346         err |= __put_user(from->si_errno, &to->si_errno);
2347         err |= __put_user((short)from->si_code, &to->si_code);
2348         switch (from->si_code & __SI_MASK) {
2349         case __SI_KILL:
2350                 err |= __put_user(from->si_pid, &to->si_pid);
2351                 err |= __put_user(from->si_uid, &to->si_uid);
2352                 break;
2353         case __SI_TIMER:
2354                  err |= __put_user(from->si_tid, &to->si_tid);
2355                  err |= __put_user(from->si_overrun, &to->si_overrun);
2356                  err |= __put_user(from->si_ptr, &to->si_ptr);
2357                 break;
2358         case __SI_POLL:
2359                 err |= __put_user(from->si_band, &to->si_band);
2360                 err |= __put_user(from->si_fd, &to->si_fd);
2361                 break;
2362         case __SI_FAULT:
2363                 err |= __put_user(from->si_addr, &to->si_addr);
2364 #ifdef __ARCH_SI_TRAPNO
2365                 err |= __put_user(from->si_trapno, &to->si_trapno);
2366 #endif
2367 #ifdef BUS_MCEERR_AO
2368                 /* 
2369                  * Other callers might not initialize the si_lsb field,
2370                  * so check explicitely for the right codes here.
2371                  */
2372                 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2373                         err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2374 #endif
2375                 break;
2376         case __SI_CHLD:
2377                 err |= __put_user(from->si_pid, &to->si_pid);
2378                 err |= __put_user(from->si_uid, &to->si_uid);
2379                 err |= __put_user(from->si_status, &to->si_status);
2380                 err |= __put_user(from->si_utime, &to->si_utime);
2381                 err |= __put_user(from->si_stime, &to->si_stime);
2382                 break;
2383         case __SI_RT: /* This is not generated by the kernel as of now. */
2384         case __SI_MESGQ: /* But this is */
2385                 err |= __put_user(from->si_pid, &to->si_pid);
2386                 err |= __put_user(from->si_uid, &to->si_uid);
2387                 err |= __put_user(from->si_ptr, &to->si_ptr);
2388                 break;
2389         default: /* this is just in case for now ... */
2390                 err |= __put_user(from->si_pid, &to->si_pid);
2391                 err |= __put_user(from->si_uid, &to->si_uid);
2392                 break;
2393         }
2394         return err;
2395 }
2396
2397 #endif
2398
2399 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2400                 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2401                 size_t, sigsetsize)
2402 {
2403         int ret, sig;
2404         sigset_t these;
2405         struct timespec ts;
2406         siginfo_t info;
2407         long timeout = 0;
2408
2409         /* XXX: Don't preclude handling different sized sigset_t's.  */
2410         if (sigsetsize != sizeof(sigset_t))
2411                 return -EINVAL;
2412
2413         if (copy_from_user(&these, uthese, sizeof(these)))
2414                 return -EFAULT;
2415                 
2416         /*
2417          * Invert the set of allowed signals to get those we
2418          * want to block.
2419          */
2420         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2421         signotset(&these);
2422
2423         if (uts) {
2424                 if (copy_from_user(&ts, uts, sizeof(ts)))
2425                         return -EFAULT;
2426                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2427                     || ts.tv_sec < 0)
2428                         return -EINVAL;
2429         }
2430
2431         spin_lock_irq(&current->sighand->siglock);
2432         sig = dequeue_signal(current, &these, &info);
2433         if (!sig) {
2434                 timeout = MAX_SCHEDULE_TIMEOUT;
2435                 if (uts)
2436                         timeout = (timespec_to_jiffies(&ts)
2437                                    + (ts.tv_sec || ts.tv_nsec));
2438
2439                 if (timeout) {
2440                         /* None ready -- temporarily unblock those we're
2441                          * interested while we are sleeping in so that we'll
2442                          * be awakened when they arrive.  */
2443                         current->real_blocked = current->blocked;
2444                         sigandsets(&current->blocked, &current->blocked, &these);
2445                         recalc_sigpending();
2446                         spin_unlock_irq(&current->sighand->siglock);
2447
2448                         timeout = schedule_timeout_interruptible(timeout);
2449
2450                         spin_lock_irq(&current->sighand->siglock);
2451                         sig = dequeue_signal(current, &these, &info);
2452                         current->blocked = current->real_blocked;
2453                         siginitset(&current->real_blocked, 0);
2454                         recalc_sigpending();
2455                 }
2456         }
2457         spin_unlock_irq(&current->sighand->siglock);
2458
2459         if (sig) {
2460                 ret = sig;
2461                 if (uinfo) {
2462                         if (copy_siginfo_to_user(uinfo, &info))
2463                                 ret = -EFAULT;
2464                 }
2465         } else {
2466                 ret = -EAGAIN;
2467                 if (timeout)
2468                         ret = -EINTR;
2469         }
2470
2471         return ret;
2472 }
2473
2474 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2475 {
2476         struct siginfo info;
2477
2478         info.si_signo = sig;
2479         info.si_errno = 0;
2480         info.si_code = SI_USER;
2481         info.si_pid = task_tgid_vnr(current);
2482         info.si_uid = current_uid();
2483
2484         return kill_something_info(sig, &info, pid);
2485 }
2486
2487 static int
2488 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2489 {
2490         struct task_struct *p;
2491         int error = -ESRCH;
2492
2493         rcu_read_lock();
2494         p = find_task_by_vpid(pid);
2495         if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2496                 error = check_kill_permission(sig, info, p);
2497                 /*
2498                  * The null signal is a permissions and process existence
2499                  * probe.  No signal is actually delivered.
2500                  */
2501                 if (!error && sig) {
2502                         error = do_send_sig_info(sig, info, p, false);
2503                         /*
2504                          * If lock_task_sighand() failed we pretend the task
2505                          * dies after receiving the signal. The window is tiny,
2506                          * and the signal is private anyway.
2507                          */
2508                         if (unlikely(error == -ESRCH))
2509                                 error = 0;
2510                 }
2511         }
2512         rcu_read_unlock();
2513
2514         return error;
2515 }
2516
2517 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2518 {
2519         struct siginfo info;
2520
2521         info.si_signo = sig;
2522         info.si_errno = 0;
2523         info.si_code = SI_TKILL;
2524         info.si_pid = task_tgid_vnr(current);
2525         info.si_uid = current_uid();
2526
2527         return do_send_specific(tgid, pid, sig, &info);
2528 }
2529
2530 /**
2531  *  sys_tgkill - send signal to one specific thread
2532  *  @tgid: the thread group ID of the thread
2533  *  @pid: the PID of the thread
2534  *  @sig: signal to be sent
2535  *
2536  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2537  *  exists but it's not belonging to the target process anymore. This
2538  *  method solves the problem of threads exiting and PIDs getting reused.
2539  */
2540 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2541 {
2542         /* This is only valid for single tasks */
2543         if (pid <= 0 || tgid <= 0)
2544                 return -EINVAL;
2545
2546         return do_tkill(tgid, pid, sig);
2547 }
2548
2549 /*
2550  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2551  */
2552 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2553 {
2554         /* This is only valid for single tasks */
2555         if (pid <= 0)
2556                 return -EINVAL;
2557
2558         return do_tkill(0, pid, sig);
2559 }
2560
2561 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2562                 siginfo_t __user *, uinfo)
2563 {
2564         siginfo_t info;
2565
2566         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2567                 return -EFAULT;
2568
2569         /* Not even root can pretend to send signals from the kernel.
2570          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2571          */
2572         if (info.si_code != SI_QUEUE) {
2573                 /* We used to allow any < 0 si_code */
2574                 WARN_ON_ONCE(info.si_code < 0);
2575                 return -EPERM;
2576         }
2577         info.si_signo = sig;
2578
2579         /* POSIX.1b doesn't mention process groups.  */
2580         return kill_proc_info(sig, &info, pid);
2581 }
2582
2583 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2584 {
2585         /* This is only valid for single tasks */
2586         if (pid <= 0 || tgid <= 0)
2587                 return -EINVAL;
2588
2589         /* Not even root can pretend to send signals from the kernel.
2590          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2591          */
2592         if (info->si_code != SI_QUEUE) {
2593                 /* We used to allow any < 0 si_code */
2594                 WARN_ON_ONCE(info->si_code < 0);
2595                 return -EPERM;
2596         }
2597         info->si_signo = sig;
2598
2599         return do_send_specific(tgid, pid, sig, info);
2600 }
2601
2602 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2603                 siginfo_t __user *, uinfo)
2604 {
2605         siginfo_t info;
2606
2607         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2608                 return -EFAULT;
2609
2610         return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2611 }
2612
2613 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2614 {
2615         struct task_struct *t = current;
2616         struct k_sigaction *k;
2617         sigset_t mask;
2618
2619         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2620                 return -EINVAL;
2621
2622         k = &t->sighand->action[sig-1];
2623
2624         spin_lock_irq(&current->sighand->siglock);
2625         if (oact)
2626                 *oact = *k;
2627
2628         if (act) {
2629                 sigdelsetmask(&act->sa.sa_mask,
2630                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2631                 *k = *act;
2632                 /*
2633                  * POSIX 3.3.1.3:
2634                  *  "Setting a signal action to SIG_IGN for a signal that is
2635                  *   pending shall cause the pending signal to be discarded,
2636                  *   whether or not it is blocked."
2637                  *
2638                  *  "Setting a signal action to SIG_DFL for a signal that is
2639                  *   pending and whose default action is to ignore the signal
2640                  *   (for example, SIGCHLD), shall cause the pending signal to
2641                  *   be discarded, whether or not it is blocked"
2642                  */
2643                 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2644                         sigemptyset(&mask);
2645                         sigaddset(&mask, sig);
2646                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2647                         do {
2648                                 rm_from_queue_full(&mask, &t->pending);
2649                                 t = next_thread(t);
2650                         } while (t != current);
2651                 }
2652         }
2653
2654         spin_unlock_irq(&current->sighand->siglock);
2655         return 0;
2656 }
2657
2658 int 
2659 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2660 {
2661         stack_t oss;
2662         int error;
2663
2664         oss.ss_sp = (void __user *) current->sas_ss_sp;
2665         oss.ss_size = current->sas_ss_size;
2666         oss.ss_flags = sas_ss_flags(sp);
2667
2668         if (uss) {
2669                 void __user *ss_sp;
2670                 size_t ss_size;
2671                 int ss_flags;
2672
2673                 error = -EFAULT;
2674                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2675                         goto out;
2676                 error = __get_user(ss_sp, &uss->ss_sp) |
2677                         __get_user(ss_flags, &uss->ss_flags) |
2678                         __get_user(ss_size, &uss->ss_size);
2679                 if (error)
2680                         goto out;
2681
2682                 error = -EPERM;
2683                 if (on_sig_stack(sp))
2684                         goto out;
2685
2686                 error = -EINVAL;
2687                 /*
2688                  *
2689                  * Note - this code used to test ss_flags incorrectly
2690                  *        old code may have been written using ss_flags==0
2691                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2692                  *        way that worked) - this fix preserves that older
2693                  *        mechanism
2694                  */
2695                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2696                         goto out;
2697
2698                 if (ss_flags == SS_DISABLE) {
2699                         ss_size = 0;
2700                         ss_sp = NULL;
2701                 } else {
2702                         error = -ENOMEM;
2703                         if (ss_size < MINSIGSTKSZ)
2704                                 goto out;
2705                 }
2706
2707                 current->sas_ss_sp = (unsigned long) ss_sp;
2708                 current->sas_ss_size = ss_size;
2709         }
2710
2711         error = 0;
2712         if (uoss) {
2713                 error = -EFAULT;
2714                 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2715                         goto out;
2716                 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2717                         __put_user(oss.ss_size, &uoss->ss_size) |
2718                         __put_user(oss.ss_flags, &uoss->ss_flags);
2719         }
2720
2721 out:
2722         return error;
2723 }
2724
2725 #ifdef __ARCH_WANT_SYS_SIGPENDING
2726
2727 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2728 {
2729         return do_sigpending(set, sizeof(*set));
2730 }
2731
2732 #endif
2733
2734 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2735 /* Some platforms have their own version with special arguments others
2736    support only sys_rt_sigprocmask.  */
2737
2738 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2739                 old_sigset_t __user *, oset)
2740 {
2741         int error;
2742         old_sigset_t old_set, new_set;
2743
2744         if (set) {
2745                 error = -EFAULT;
2746                 if (copy_from_user(&new_set, set, sizeof(*set)))
2747                         goto out;
2748                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2749
2750                 spin_lock_irq(&current->sighand->siglock);
2751                 old_set = current->blocked.sig[0];
2752
2753                 error = 0;
2754                 switch (how) {
2755                 default:
2756                         error = -EINVAL;
2757                         break;
2758                 case SIG_BLOCK:
2759                         sigaddsetmask(&current->blocked, new_set);
2760                         break;
2761                 case SIG_UNBLOCK:
2762                         sigdelsetmask(&current->blocked, new_set);
2763                         break;
2764                 case SIG_SETMASK:
2765                         current->blocked.sig[0] = new_set;
2766                         break;
2767                 }
2768
2769                 recalc_sigpending();
2770                 spin_unlock_irq(&current->sighand->siglock);
2771                 if (error)
2772                         goto out;
2773                 if (oset)
2774                         goto set_old;
2775         } else if (oset) {
2776                 old_set = current->blocked.sig[0];
2777         set_old:
2778                 error = -EFAULT;
2779                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2780                         goto out;
2781         }
2782         error = 0;
2783 out:
2784         return error;
2785 }
2786 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2787
2788 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2789 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2790                 const struct sigaction __user *, act,
2791                 struct sigaction __user *, oact,
2792                 size_t, sigsetsize)
2793 {
2794         struct k_sigaction new_sa, old_sa;
2795         int ret = -EINVAL;
2796
2797         /* XXX: Don't preclude handling different sized sigset_t's.  */
2798         if (sigsetsize != sizeof(sigset_t))
2799                 goto out;
2800
2801         if (act) {
2802                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2803                         return -EFAULT;
2804         }
2805
2806         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2807
2808         if (!ret && oact) {
2809                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2810                         return -EFAULT;
2811         }
2812 out:
2813         return ret;
2814 }
2815 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2816
2817 #ifdef __ARCH_WANT_SYS_SGETMASK
2818
2819 /*
2820  * For backwards compatibility.  Functionality superseded by sigprocmask.
2821  */
2822 SYSCALL_DEFINE0(sgetmask)
2823 {
2824         /* SMP safe */
2825         return current->blocked.sig[0];
2826 }
2827
2828 SYSCALL_DEFINE1(ssetmask, int, newmask)
2829 {
2830         int old;
2831
2832         spin_lock_irq(&current->sighand->siglock);
2833         old = current->blocked.sig[0];
2834
2835         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2836                                                   sigmask(SIGSTOP)));
2837         recalc_sigpending();
2838         spin_unlock_irq(&current->sighand->siglock);
2839
2840         return old;
2841 }
2842 #endif /* __ARCH_WANT_SGETMASK */
2843
2844 #ifdef __ARCH_WANT_SYS_SIGNAL
2845 /*
2846  * For backwards compatibility.  Functionality superseded by sigaction.
2847  */
2848 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2849 {
2850         struct k_sigaction new_sa, old_sa;
2851         int ret;
2852
2853         new_sa.sa.sa_handler = handler;
2854         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2855         sigemptyset(&new_sa.sa.sa_mask);
2856
2857         ret = do_sigaction(sig, &new_sa, &old_sa);
2858
2859         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2860 }
2861 #endif /* __ARCH_WANT_SYS_SIGNAL */
2862
2863 #ifdef __ARCH_WANT_SYS_PAUSE
2864
2865 SYSCALL_DEFINE0(pause)
2866 {
2867         current->state = TASK_INTERRUPTIBLE;
2868         schedule();
2869         return -ERESTARTNOHAND;
2870 }
2871
2872 #endif
2873
2874 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2875 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2876 {
2877         sigset_t newset;
2878
2879         /* XXX: Don't preclude handling different sized sigset_t's.  */
2880         if (sigsetsize != sizeof(sigset_t))
2881                 return -EINVAL;
2882
2883         if (copy_from_user(&newset, unewset, sizeof(newset)))
2884                 return -EFAULT;
2885         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2886
2887         spin_lock_irq(&current->sighand->siglock);
2888         current->saved_sigmask = current->blocked;
2889         current->blocked = newset;
2890         recalc_sigpending();
2891         spin_unlock_irq(&current->sighand->siglock);
2892
2893         current->state = TASK_INTERRUPTIBLE;
2894         schedule();
2895         set_restore_sigmask();
2896         return -ERESTARTNOHAND;
2897 }
2898 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2899
2900 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2901 {
2902         return NULL;
2903 }
2904
2905 void __init signals_init(void)
2906 {
2907         sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2908 }
2909
2910 #ifdef CONFIG_KGDB_KDB
2911 #include <linux/kdb.h>
2912 /*
2913  * kdb_send_sig_info - Allows kdb to send signals without exposing
2914  * signal internals.  This function checks if the required locks are
2915  * available before calling the main signal code, to avoid kdb
2916  * deadlocks.
2917  */
2918 void
2919 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2920 {
2921         static struct task_struct *kdb_prev_t;
2922         int sig, new_t;
2923         if (!spin_trylock(&t->sighand->siglock)) {
2924                 kdb_printf("Can't do kill command now.\n"
2925                            "The sigmask lock is held somewhere else in "
2926                            "kernel, try again later\n");
2927                 return;
2928         }
2929         spin_unlock(&t->sighand->siglock);
2930         new_t = kdb_prev_t != t;
2931         kdb_prev_t = t;
2932         if (t->state != TASK_RUNNING && new_t) {
2933                 kdb_printf("Process is not RUNNING, sending a signal from "
2934                            "kdb risks deadlock\n"
2935                            "on the run queue locks. "
2936                            "The signal has _not_ been sent.\n"
2937                            "Reissue the kill command if you want to risk "
2938                            "the deadlock.\n");
2939                 return;
2940         }
2941         sig = info->si_signo;
2942         if (send_sig_info(sig, info, t))
2943                 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2944                            sig, t->pid);
2945         else
2946                 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2947 }
2948 #endif  /* CONFIG_KGDB_KDB */