[PATCH] cleanup the usage of SEND_SIG_xxx constants
[linux-3.10.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
32
33 /*
34  * SLAB caches for signal bits.
35  */
36
37 static kmem_cache_t *sigqueue_cachep;
38
39 /*
40  * In POSIX a signal is sent either to a specific thread (Linux task)
41  * or to the process as a whole (Linux thread group).  How the signal
42  * is sent determines whether it's to one thread or the whole group,
43  * which determines which signal mask(s) are involved in blocking it
44  * from being delivered until later.  When the signal is delivered,
45  * either it's caught or ignored by a user handler or it has a default
46  * effect that applies to the whole thread group (POSIX process).
47  *
48  * The possible effects an unblocked signal set to SIG_DFL can have are:
49  *   ignore     - Nothing Happens
50  *   terminate  - kill the process, i.e. all threads in the group,
51  *                similar to exit_group.  The group leader (only) reports
52  *                WIFSIGNALED status to its parent.
53  *   coredump   - write a core dump file describing all threads using
54  *                the same mm and then kill all those threads
55  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
56  *
57  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58  * Other signals when not blocked and set to SIG_DFL behaves as follows.
59  * The job control signals also have other special effects.
60  *
61  *      +--------------------+------------------+
62  *      |  POSIX signal      |  default action  |
63  *      +--------------------+------------------+
64  *      |  SIGHUP            |  terminate       |
65  *      |  SIGINT            |  terminate       |
66  *      |  SIGQUIT           |  coredump        |
67  *      |  SIGILL            |  coredump        |
68  *      |  SIGTRAP           |  coredump        |
69  *      |  SIGABRT/SIGIOT    |  coredump        |
70  *      |  SIGBUS            |  coredump        |
71  *      |  SIGFPE            |  coredump        |
72  *      |  SIGKILL           |  terminate(+)    |
73  *      |  SIGUSR1           |  terminate       |
74  *      |  SIGSEGV           |  coredump        |
75  *      |  SIGUSR2           |  terminate       |
76  *      |  SIGPIPE           |  terminate       |
77  *      |  SIGALRM           |  terminate       |
78  *      |  SIGTERM           |  terminate       |
79  *      |  SIGCHLD           |  ignore          |
80  *      |  SIGCONT           |  ignore(*)       |
81  *      |  SIGSTOP           |  stop(*)(+)      |
82  *      |  SIGTSTP           |  stop(*)         |
83  *      |  SIGTTIN           |  stop(*)         |
84  *      |  SIGTTOU           |  stop(*)         |
85  *      |  SIGURG            |  ignore          |
86  *      |  SIGXCPU           |  coredump        |
87  *      |  SIGXFSZ           |  coredump        |
88  *      |  SIGVTALRM         |  terminate       |
89  *      |  SIGPROF           |  terminate       |
90  *      |  SIGPOLL/SIGIO     |  terminate       |
91  *      |  SIGSYS/SIGUNUSED  |  coredump        |
92  *      |  SIGSTKFLT         |  terminate       |
93  *      |  SIGWINCH          |  ignore          |
94  *      |  SIGPWR            |  terminate       |
95  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
96  *      +--------------------+------------------+
97  *      |  non-POSIX signal  |  default action  |
98  *      +--------------------+------------------+
99  *      |  SIGEMT            |  coredump        |
100  *      +--------------------+------------------+
101  *
102  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103  * (*) Special job control effects:
104  * When SIGCONT is sent, it resumes the process (all threads in the group)
105  * from TASK_STOPPED state and also clears any pending/queued stop signals
106  * (any of those marked with "stop(*)").  This happens regardless of blocking,
107  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
108  * any pending/queued SIGCONT signals; this happens regardless of blocking,
109  * catching, or ignored the stop signal, though (except for SIGSTOP) the
110  * default action of stopping the process may happen later or never.
111  */
112
113 #ifdef SIGEMT
114 #define M_SIGEMT        M(SIGEMT)
115 #else
116 #define M_SIGEMT        0
117 #endif
118
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
121 #else
122 #define M(sig) (1UL << ((sig)-1))
123 #endif
124 #define T(sig, mask) (M(sig) & (mask))
125
126 #define SIG_KERNEL_ONLY_MASK (\
127         M(SIGKILL)   |  M(SIGSTOP)                                   )
128
129 #define SIG_KERNEL_STOP_MASK (\
130         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
131
132 #define SIG_KERNEL_COREDUMP_MASK (\
133         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
134         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
135         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
136
137 #define SIG_KERNEL_IGNORE_MASK (\
138         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
139
140 #define sig_kernel_only(sig) \
141                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
148
149 #define sig_user_defined(t, signr) \
150         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
151          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152
153 #define sig_fatal(t, signr) \
154         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156
157 static int sig_ignored(struct task_struct *t, int sig)
158 {
159         void __user * handler;
160
161         /*
162          * Tracers always want to know about signals..
163          */
164         if (t->ptrace & PT_PTRACED)
165                 return 0;
166
167         /*
168          * Blocked signals are never ignored, since the
169          * signal handler may change by the time it is
170          * unblocked.
171          */
172         if (sigismember(&t->blocked, sig))
173                 return 0;
174
175         /* Is it explicitly or implicitly ignored? */
176         handler = t->sighand->action[sig-1].sa.sa_handler;
177         return   handler == SIG_IGN ||
178                 (handler == SIG_DFL && sig_kernel_ignore(sig));
179 }
180
181 /*
182  * Re-calculate pending state from the set of locally pending
183  * signals, globally pending signals, and blocked signals.
184  */
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
186 {
187         unsigned long ready;
188         long i;
189
190         switch (_NSIG_WORDS) {
191         default:
192                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193                         ready |= signal->sig[i] &~ blocked->sig[i];
194                 break;
195
196         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
197                 ready |= signal->sig[2] &~ blocked->sig[2];
198                 ready |= signal->sig[1] &~ blocked->sig[1];
199                 ready |= signal->sig[0] &~ blocked->sig[0];
200                 break;
201
202         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
203                 ready |= signal->sig[0] &~ blocked->sig[0];
204                 break;
205
206         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
207         }
208         return ready != 0;
209 }
210
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 {
215         if (t->signal->group_stop_count > 0 ||
216             (freezing(t)) ||
217             PENDING(&t->pending, &t->blocked) ||
218             PENDING(&t->signal->shared_pending, &t->blocked))
219                 set_tsk_thread_flag(t, TIF_SIGPENDING);
220         else
221                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 }
223
224 void recalc_sigpending(void)
225 {
226         recalc_sigpending_tsk(current);
227 }
228
229 /* Given the mask, find the first available signal that should be serviced. */
230
231 static int
232 next_signal(struct sigpending *pending, sigset_t *mask)
233 {
234         unsigned long i, *s, *m, x;
235         int sig = 0;
236         
237         s = pending->signal.sig;
238         m = mask->sig;
239         switch (_NSIG_WORDS) {
240         default:
241                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242                         if ((x = *s &~ *m) != 0) {
243                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
244                                 break;
245                         }
246                 break;
247
248         case 2: if ((x = s[0] &~ m[0]) != 0)
249                         sig = 1;
250                 else if ((x = s[1] &~ m[1]) != 0)
251                         sig = _NSIG_BPW + 1;
252                 else
253                         break;
254                 sig += ffz(~x);
255                 break;
256
257         case 1: if ((x = *s &~ *m) != 0)
258                         sig = ffz(~x) + 1;
259                 break;
260         }
261         
262         return sig;
263 }
264
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
266                                          int override_rlimit)
267 {
268         struct sigqueue *q = NULL;
269
270         atomic_inc(&t->user->sigpending);
271         if (override_rlimit ||
272             atomic_read(&t->user->sigpending) <=
273                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274                 q = kmem_cache_alloc(sigqueue_cachep, flags);
275         if (unlikely(q == NULL)) {
276                 atomic_dec(&t->user->sigpending);
277         } else {
278                 INIT_LIST_HEAD(&q->list);
279                 q->flags = 0;
280                 q->user = get_uid(t->user);
281         }
282         return(q);
283 }
284
285 static inline void __sigqueue_free(struct sigqueue *q)
286 {
287         if (q->flags & SIGQUEUE_PREALLOC)
288                 return;
289         atomic_dec(&q->user->sigpending);
290         free_uid(q->user);
291         kmem_cache_free(sigqueue_cachep, q);
292 }
293
294 static void flush_sigqueue(struct sigpending *queue)
295 {
296         struct sigqueue *q;
297
298         sigemptyset(&queue->signal);
299         while (!list_empty(&queue->list)) {
300                 q = list_entry(queue->list.next, struct sigqueue , list);
301                 list_del_init(&q->list);
302                 __sigqueue_free(q);
303         }
304 }
305
306 /*
307  * Flush all pending signals for a task.
308  */
309
310 void
311 flush_signals(struct task_struct *t)
312 {
313         unsigned long flags;
314
315         spin_lock_irqsave(&t->sighand->siglock, flags);
316         clear_tsk_thread_flag(t,TIF_SIGPENDING);
317         flush_sigqueue(&t->pending);
318         flush_sigqueue(&t->signal->shared_pending);
319         spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 }
321
322 /*
323  * This function expects the tasklist_lock write-locked.
324  */
325 void __exit_sighand(struct task_struct *tsk)
326 {
327         struct sighand_struct * sighand = tsk->sighand;
328
329         /* Ok, we're done with the signal handlers */
330         tsk->sighand = NULL;
331         if (atomic_dec_and_test(&sighand->count))
332                 kmem_cache_free(sighand_cachep, sighand);
333 }
334
335 void exit_sighand(struct task_struct *tsk)
336 {
337         write_lock_irq(&tasklist_lock);
338         __exit_sighand(tsk);
339         write_unlock_irq(&tasklist_lock);
340 }
341
342 /*
343  * This function expects the tasklist_lock write-locked.
344  */
345 void __exit_signal(struct task_struct *tsk)
346 {
347         struct signal_struct * sig = tsk->signal;
348         struct sighand_struct * sighand = tsk->sighand;
349
350         if (!sig)
351                 BUG();
352         if (!atomic_read(&sig->count))
353                 BUG();
354         spin_lock(&sighand->siglock);
355         posix_cpu_timers_exit(tsk);
356         if (atomic_dec_and_test(&sig->count)) {
357                 posix_cpu_timers_exit_group(tsk);
358                 if (tsk == sig->curr_target)
359                         sig->curr_target = next_thread(tsk);
360                 tsk->signal = NULL;
361                 spin_unlock(&sighand->siglock);
362                 flush_sigqueue(&sig->shared_pending);
363         } else {
364                 /*
365                  * If there is any task waiting for the group exit
366                  * then notify it:
367                  */
368                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369                         wake_up_process(sig->group_exit_task);
370                         sig->group_exit_task = NULL;
371                 }
372                 if (tsk == sig->curr_target)
373                         sig->curr_target = next_thread(tsk);
374                 tsk->signal = NULL;
375                 /*
376                  * Accumulate here the counters for all threads but the
377                  * group leader as they die, so they can be added into
378                  * the process-wide totals when those are taken.
379                  * The group leader stays around as a zombie as long
380                  * as there are other threads.  When it gets reaped,
381                  * the exit.c code will add its counts into these totals.
382                  * We won't ever get here for the group leader, since it
383                  * will have been the last reference on the signal_struct.
384                  */
385                 sig->utime = cputime_add(sig->utime, tsk->utime);
386                 sig->stime = cputime_add(sig->stime, tsk->stime);
387                 sig->min_flt += tsk->min_flt;
388                 sig->maj_flt += tsk->maj_flt;
389                 sig->nvcsw += tsk->nvcsw;
390                 sig->nivcsw += tsk->nivcsw;
391                 sig->sched_time += tsk->sched_time;
392                 spin_unlock(&sighand->siglock);
393                 sig = NULL;     /* Marker for below.  */
394         }
395         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396         flush_sigqueue(&tsk->pending);
397         if (sig) {
398                 /*
399                  * We are cleaning up the signal_struct here.
400                  */
401                 exit_thread_group_keys(sig);
402                 kmem_cache_free(signal_cachep, sig);
403         }
404 }
405
406 void exit_signal(struct task_struct *tsk)
407 {
408         atomic_dec(&tsk->signal->live);
409
410         write_lock_irq(&tasklist_lock);
411         __exit_signal(tsk);
412         write_unlock_irq(&tasklist_lock);
413 }
414
415 /*
416  * Flush all handlers for a task.
417  */
418
419 void
420 flush_signal_handlers(struct task_struct *t, int force_default)
421 {
422         int i;
423         struct k_sigaction *ka = &t->sighand->action[0];
424         for (i = _NSIG ; i != 0 ; i--) {
425                 if (force_default || ka->sa.sa_handler != SIG_IGN)
426                         ka->sa.sa_handler = SIG_DFL;
427                 ka->sa.sa_flags = 0;
428                 sigemptyset(&ka->sa.sa_mask);
429                 ka++;
430         }
431 }
432
433
434 /* Notify the system that a driver wants to block all signals for this
435  * process, and wants to be notified if any signals at all were to be
436  * sent/acted upon.  If the notifier routine returns non-zero, then the
437  * signal will be acted upon after all.  If the notifier routine returns 0,
438  * then then signal will be blocked.  Only one block per process is
439  * allowed.  priv is a pointer to private data that the notifier routine
440  * can use to determine if the signal should be blocked or not.  */
441
442 void
443 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
444 {
445         unsigned long flags;
446
447         spin_lock_irqsave(&current->sighand->siglock, flags);
448         current->notifier_mask = mask;
449         current->notifier_data = priv;
450         current->notifier = notifier;
451         spin_unlock_irqrestore(&current->sighand->siglock, flags);
452 }
453
454 /* Notify the system that blocking has ended. */
455
456 void
457 unblock_all_signals(void)
458 {
459         unsigned long flags;
460
461         spin_lock_irqsave(&current->sighand->siglock, flags);
462         current->notifier = NULL;
463         current->notifier_data = NULL;
464         recalc_sigpending();
465         spin_unlock_irqrestore(&current->sighand->siglock, flags);
466 }
467
468 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
469 {
470         struct sigqueue *q, *first = NULL;
471         int still_pending = 0;
472
473         if (unlikely(!sigismember(&list->signal, sig)))
474                 return 0;
475
476         /*
477          * Collect the siginfo appropriate to this signal.  Check if
478          * there is another siginfo for the same signal.
479         */
480         list_for_each_entry(q, &list->list, list) {
481                 if (q->info.si_signo == sig) {
482                         if (first) {
483                                 still_pending = 1;
484                                 break;
485                         }
486                         first = q;
487                 }
488         }
489         if (first) {
490                 list_del_init(&first->list);
491                 copy_siginfo(info, &first->info);
492                 __sigqueue_free(first);
493                 if (!still_pending)
494                         sigdelset(&list->signal, sig);
495         } else {
496
497                 /* Ok, it wasn't in the queue.  This must be
498                    a fast-pathed signal or we must have been
499                    out of queue space.  So zero out the info.
500                  */
501                 sigdelset(&list->signal, sig);
502                 info->si_signo = sig;
503                 info->si_errno = 0;
504                 info->si_code = 0;
505                 info->si_pid = 0;
506                 info->si_uid = 0;
507         }
508         return 1;
509 }
510
511 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
512                         siginfo_t *info)
513 {
514         int sig = 0;
515
516         /* SIGKILL must have priority, otherwise it is quite easy
517          * to create an unkillable process, sending sig < SIGKILL
518          * to self */
519         if (unlikely(sigismember(&pending->signal, SIGKILL))) {
520                 if (!sigismember(mask, SIGKILL))
521                         sig = SIGKILL;
522         }
523
524         if (likely(!sig))
525                 sig = next_signal(pending, mask);
526         if (sig) {
527                 if (current->notifier) {
528                         if (sigismember(current->notifier_mask, sig)) {
529                                 if (!(current->notifier)(current->notifier_data)) {
530                                         clear_thread_flag(TIF_SIGPENDING);
531                                         return 0;
532                                 }
533                         }
534                 }
535
536                 if (!collect_signal(sig, pending, info))
537                         sig = 0;
538                                 
539         }
540         recalc_sigpending();
541
542         return sig;
543 }
544
545 /*
546  * Dequeue a signal and return the element to the caller, which is 
547  * expected to free it.
548  *
549  * All callers have to hold the siglock.
550  */
551 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
552 {
553         int signr = __dequeue_signal(&tsk->pending, mask, info);
554         if (!signr)
555                 signr = __dequeue_signal(&tsk->signal->shared_pending,
556                                          mask, info);
557         if (signr && unlikely(sig_kernel_stop(signr))) {
558                 /*
559                  * Set a marker that we have dequeued a stop signal.  Our
560                  * caller might release the siglock and then the pending
561                  * stop signal it is about to process is no longer in the
562                  * pending bitmasks, but must still be cleared by a SIGCONT
563                  * (and overruled by a SIGKILL).  So those cases clear this
564                  * shared flag after we've set it.  Note that this flag may
565                  * remain set after the signal we return is ignored or
566                  * handled.  That doesn't matter because its only purpose
567                  * is to alert stop-signal processing code when another
568                  * processor has come along and cleared the flag.
569                  */
570                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
571                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
572         }
573         if ( signr &&
574              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
575              info->si_sys_private){
576                 /*
577                  * Release the siglock to ensure proper locking order
578                  * of timer locks outside of siglocks.  Note, we leave
579                  * irqs disabled here, since the posix-timers code is
580                  * about to disable them again anyway.
581                  */
582                 spin_unlock(&tsk->sighand->siglock);
583                 do_schedule_next_timer(info);
584                 spin_lock(&tsk->sighand->siglock);
585         }
586         return signr;
587 }
588
589 /*
590  * Tell a process that it has a new active signal..
591  *
592  * NOTE! we rely on the previous spin_lock to
593  * lock interrupts for us! We can only be called with
594  * "siglock" held, and the local interrupt must
595  * have been disabled when that got acquired!
596  *
597  * No need to set need_resched since signal event passing
598  * goes through ->blocked
599  */
600 void signal_wake_up(struct task_struct *t, int resume)
601 {
602         unsigned int mask;
603
604         set_tsk_thread_flag(t, TIF_SIGPENDING);
605
606         /*
607          * For SIGKILL, we want to wake it up in the stopped/traced case.
608          * We don't check t->state here because there is a race with it
609          * executing another processor and just now entering stopped state.
610          * By using wake_up_state, we ensure the process will wake up and
611          * handle its death signal.
612          */
613         mask = TASK_INTERRUPTIBLE;
614         if (resume)
615                 mask |= TASK_STOPPED | TASK_TRACED;
616         if (!wake_up_state(t, mask))
617                 kick_process(t);
618 }
619
620 /*
621  * Remove signals in mask from the pending set and queue.
622  * Returns 1 if any signals were found.
623  *
624  * All callers must be holding the siglock.
625  */
626 static int rm_from_queue(unsigned long mask, struct sigpending *s)
627 {
628         struct sigqueue *q, *n;
629
630         if (!sigtestsetmask(&s->signal, mask))
631                 return 0;
632
633         sigdelsetmask(&s->signal, mask);
634         list_for_each_entry_safe(q, n, &s->list, list) {
635                 if (q->info.si_signo < SIGRTMIN &&
636                     (mask & sigmask(q->info.si_signo))) {
637                         list_del_init(&q->list);
638                         __sigqueue_free(q);
639                 }
640         }
641         return 1;
642 }
643
644 /*
645  * Bad permissions for sending the signal
646  */
647 static int check_kill_permission(int sig, struct siginfo *info,
648                                  struct task_struct *t)
649 {
650         int error = -EINVAL;
651         if (!valid_signal(sig))
652                 return error;
653         error = -EPERM;
654         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
655             && ((sig != SIGCONT) ||
656                 (current->signal->session != t->signal->session))
657             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
658             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
659             && !capable(CAP_KILL))
660                 return error;
661
662         error = security_task_kill(t, info, sig);
663         if (!error)
664                 audit_signal_info(sig, t); /* Let audit system see the signal */
665         return error;
666 }
667
668 /* forward decl */
669 static void do_notify_parent_cldstop(struct task_struct *tsk,
670                                      int to_self,
671                                      int why);
672
673 /*
674  * Handle magic process-wide effects of stop/continue signals.
675  * Unlike the signal actions, these happen immediately at signal-generation
676  * time regardless of blocking, ignoring, or handling.  This does the
677  * actual continuing for SIGCONT, but not the actual stopping for stop
678  * signals.  The process stop is done as a signal action for SIG_DFL.
679  */
680 static void handle_stop_signal(int sig, struct task_struct *p)
681 {
682         struct task_struct *t;
683
684         if (p->signal->flags & SIGNAL_GROUP_EXIT)
685                 /*
686                  * The process is in the middle of dying already.
687                  */
688                 return;
689
690         if (sig_kernel_stop(sig)) {
691                 /*
692                  * This is a stop signal.  Remove SIGCONT from all queues.
693                  */
694                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
695                 t = p;
696                 do {
697                         rm_from_queue(sigmask(SIGCONT), &t->pending);
698                         t = next_thread(t);
699                 } while (t != p);
700         } else if (sig == SIGCONT) {
701                 /*
702                  * Remove all stop signals from all queues,
703                  * and wake all threads.
704                  */
705                 if (unlikely(p->signal->group_stop_count > 0)) {
706                         /*
707                          * There was a group stop in progress.  We'll
708                          * pretend it finished before we got here.  We are
709                          * obliged to report it to the parent: if the
710                          * SIGSTOP happened "after" this SIGCONT, then it
711                          * would have cleared this pending SIGCONT.  If it
712                          * happened "before" this SIGCONT, then the parent
713                          * got the SIGCHLD about the stop finishing before
714                          * the continue happened.  We do the notification
715                          * now, and it's as if the stop had finished and
716                          * the SIGCHLD was pending on entry to this kill.
717                          */
718                         p->signal->group_stop_count = 0;
719                         p->signal->flags = SIGNAL_STOP_CONTINUED;
720                         spin_unlock(&p->sighand->siglock);
721                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
722                         spin_lock(&p->sighand->siglock);
723                 }
724                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
725                 t = p;
726                 do {
727                         unsigned int state;
728                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
729                         
730                         /*
731                          * If there is a handler for SIGCONT, we must make
732                          * sure that no thread returns to user mode before
733                          * we post the signal, in case it was the only
734                          * thread eligible to run the signal handler--then
735                          * it must not do anything between resuming and
736                          * running the handler.  With the TIF_SIGPENDING
737                          * flag set, the thread will pause and acquire the
738                          * siglock that we hold now and until we've queued
739                          * the pending signal. 
740                          *
741                          * Wake up the stopped thread _after_ setting
742                          * TIF_SIGPENDING
743                          */
744                         state = TASK_STOPPED;
745                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
746                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
747                                 state |= TASK_INTERRUPTIBLE;
748                         }
749                         wake_up_state(t, state);
750
751                         t = next_thread(t);
752                 } while (t != p);
753
754                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
755                         /*
756                          * We were in fact stopped, and are now continued.
757                          * Notify the parent with CLD_CONTINUED.
758                          */
759                         p->signal->flags = SIGNAL_STOP_CONTINUED;
760                         p->signal->group_exit_code = 0;
761                         spin_unlock(&p->sighand->siglock);
762                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
763                         spin_lock(&p->sighand->siglock);
764                 } else {
765                         /*
766                          * We are not stopped, but there could be a stop
767                          * signal in the middle of being processed after
768                          * being removed from the queue.  Clear that too.
769                          */
770                         p->signal->flags = 0;
771                 }
772         } else if (sig == SIGKILL) {
773                 /*
774                  * Make sure that any pending stop signal already dequeued
775                  * is undone by the wakeup for SIGKILL.
776                  */
777                 p->signal->flags = 0;
778         }
779 }
780
781 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
782                         struct sigpending *signals)
783 {
784         struct sigqueue * q = NULL;
785         int ret = 0;
786
787         /*
788          * fast-pathed signals for kernel-internal things like SIGSTOP
789          * or SIGKILL.
790          */
791         if (info == SEND_SIG_FORCED)
792                 goto out_set;
793
794         /* Real-time signals must be queued if sent by sigqueue, or
795            some other real-time mechanism.  It is implementation
796            defined whether kill() does so.  We attempt to do so, on
797            the principle of least surprise, but since kill is not
798            allowed to fail with EAGAIN when low on memory we just
799            make sure at least one signal gets delivered and don't
800            pass on the info struct.  */
801
802         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
803                                              (is_si_special(info) ||
804                                               info->si_code >= 0)));
805         if (q) {
806                 list_add_tail(&q->list, &signals->list);
807                 switch ((unsigned long) info) {
808                 case (unsigned long) SEND_SIG_NOINFO:
809                         q->info.si_signo = sig;
810                         q->info.si_errno = 0;
811                         q->info.si_code = SI_USER;
812                         q->info.si_pid = current->pid;
813                         q->info.si_uid = current->uid;
814                         break;
815                 case (unsigned long) SEND_SIG_PRIV:
816                         q->info.si_signo = sig;
817                         q->info.si_errno = 0;
818                         q->info.si_code = SI_KERNEL;
819                         q->info.si_pid = 0;
820                         q->info.si_uid = 0;
821                         break;
822                 default:
823                         copy_siginfo(&q->info, info);
824                         break;
825                 }
826         } else if (!is_si_special(info)) {
827                 if (sig >= SIGRTMIN && info->si_code != SI_USER)
828                 /*
829                  * Queue overflow, abort.  We may abort if the signal was rt
830                  * and sent by user using something other than kill().
831                  */
832                         return -EAGAIN;
833                 if (info->si_code == SI_TIMER)
834                         /*
835                          * Set up a return to indicate that we dropped 
836                          * the signal.
837                          */
838                         ret = info->si_sys_private;
839         }
840
841 out_set:
842         sigaddset(&signals->signal, sig);
843         return ret;
844 }
845
846 #define LEGACY_QUEUE(sigptr, sig) \
847         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
848
849
850 static int
851 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
852 {
853         int ret = 0;
854
855         if (!irqs_disabled())
856                 BUG();
857         assert_spin_locked(&t->sighand->siglock);
858
859         if (!is_si_special(info) && (info->si_code == SI_TIMER))
860                 /*
861                  * Set up a return to indicate that we dropped the signal.
862                  */
863                 ret = info->si_sys_private;
864
865         /* Short-circuit ignored signals.  */
866         if (sig_ignored(t, sig))
867                 goto out;
868
869         /* Support queueing exactly one non-rt signal, so that we
870            can get more detailed information about the cause of
871            the signal. */
872         if (LEGACY_QUEUE(&t->pending, sig))
873                 goto out;
874
875         ret = send_signal(sig, info, t, &t->pending);
876         if (!ret && !sigismember(&t->blocked, sig))
877                 signal_wake_up(t, sig == SIGKILL);
878 out:
879         return ret;
880 }
881
882 /*
883  * Force a signal that the process can't ignore: if necessary
884  * we unblock the signal and change any SIG_IGN to SIG_DFL.
885  */
886
887 int
888 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
889 {
890         unsigned long int flags;
891         int ret;
892
893         spin_lock_irqsave(&t->sighand->siglock, flags);
894         if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
895                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
896                 sigdelset(&t->blocked, sig);
897                 recalc_sigpending_tsk(t);
898         }
899         ret = specific_send_sig_info(sig, info, t);
900         spin_unlock_irqrestore(&t->sighand->siglock, flags);
901
902         return ret;
903 }
904
905 void
906 force_sig_specific(int sig, struct task_struct *t)
907 {
908         unsigned long int flags;
909
910         spin_lock_irqsave(&t->sighand->siglock, flags);
911         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
912                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
913         sigdelset(&t->blocked, sig);
914         recalc_sigpending_tsk(t);
915         specific_send_sig_info(sig, SEND_SIG_FORCED, t);
916         spin_unlock_irqrestore(&t->sighand->siglock, flags);
917 }
918
919 /*
920  * Test if P wants to take SIG.  After we've checked all threads with this,
921  * it's equivalent to finding no threads not blocking SIG.  Any threads not
922  * blocking SIG were ruled out because they are not running and already
923  * have pending signals.  Such threads will dequeue from the shared queue
924  * as soon as they're available, so putting the signal on the shared queue
925  * will be equivalent to sending it to one such thread.
926  */
927 static inline int wants_signal(int sig, struct task_struct *p)
928 {
929         if (sigismember(&p->blocked, sig))
930                 return 0;
931         if (p->flags & PF_EXITING)
932                 return 0;
933         if (sig == SIGKILL)
934                 return 1;
935         if (p->state & (TASK_STOPPED | TASK_TRACED))
936                 return 0;
937         return task_curr(p) || !signal_pending(p);
938 }
939
940 static void
941 __group_complete_signal(int sig, struct task_struct *p)
942 {
943         struct task_struct *t;
944
945         /*
946          * Now find a thread we can wake up to take the signal off the queue.
947          *
948          * If the main thread wants the signal, it gets first crack.
949          * Probably the least surprising to the average bear.
950          */
951         if (wants_signal(sig, p))
952                 t = p;
953         else if (thread_group_empty(p))
954                 /*
955                  * There is just one thread and it does not need to be woken.
956                  * It will dequeue unblocked signals before it runs again.
957                  */
958                 return;
959         else {
960                 /*
961                  * Otherwise try to find a suitable thread.
962                  */
963                 t = p->signal->curr_target;
964                 if (t == NULL)
965                         /* restart balancing at this thread */
966                         t = p->signal->curr_target = p;
967                 BUG_ON(t->tgid != p->tgid);
968
969                 while (!wants_signal(sig, t)) {
970                         t = next_thread(t);
971                         if (t == p->signal->curr_target)
972                                 /*
973                                  * No thread needs to be woken.
974                                  * Any eligible threads will see
975                                  * the signal in the queue soon.
976                                  */
977                                 return;
978                 }
979                 p->signal->curr_target = t;
980         }
981
982         /*
983          * Found a killable thread.  If the signal will be fatal,
984          * then start taking the whole group down immediately.
985          */
986         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
987             !sigismember(&t->real_blocked, sig) &&
988             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
989                 /*
990                  * This signal will be fatal to the whole group.
991                  */
992                 if (!sig_kernel_coredump(sig)) {
993                         /*
994                          * Start a group exit and wake everybody up.
995                          * This way we don't have other threads
996                          * running and doing things after a slower
997                          * thread has the fatal signal pending.
998                          */
999                         p->signal->flags = SIGNAL_GROUP_EXIT;
1000                         p->signal->group_exit_code = sig;
1001                         p->signal->group_stop_count = 0;
1002                         t = p;
1003                         do {
1004                                 sigaddset(&t->pending.signal, SIGKILL);
1005                                 signal_wake_up(t, 1);
1006                                 t = next_thread(t);
1007                         } while (t != p);
1008                         return;
1009                 }
1010
1011                 /*
1012                  * There will be a core dump.  We make all threads other
1013                  * than the chosen one go into a group stop so that nothing
1014                  * happens until it gets scheduled, takes the signal off
1015                  * the shared queue, and does the core dump.  This is a
1016                  * little more complicated than strictly necessary, but it
1017                  * keeps the signal state that winds up in the core dump
1018                  * unchanged from the death state, e.g. which thread had
1019                  * the core-dump signal unblocked.
1020                  */
1021                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1022                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1023                 p->signal->group_stop_count = 0;
1024                 p->signal->group_exit_task = t;
1025                 t = p;
1026                 do {
1027                         p->signal->group_stop_count++;
1028                         signal_wake_up(t, 0);
1029                         t = next_thread(t);
1030                 } while (t != p);
1031                 wake_up_process(p->signal->group_exit_task);
1032                 return;
1033         }
1034
1035         /*
1036          * The signal is already in the shared-pending queue.
1037          * Tell the chosen thread to wake up and dequeue it.
1038          */
1039         signal_wake_up(t, sig == SIGKILL);
1040         return;
1041 }
1042
1043 int
1044 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1045 {
1046         int ret = 0;
1047
1048         assert_spin_locked(&p->sighand->siglock);
1049         handle_stop_signal(sig, p);
1050
1051         if (!is_si_special(info) && (info->si_code == SI_TIMER))
1052                 /*
1053                  * Set up a return to indicate that we dropped the signal.
1054                  */
1055                 ret = info->si_sys_private;
1056
1057         /* Short-circuit ignored signals.  */
1058         if (sig_ignored(p, sig))
1059                 return ret;
1060
1061         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1062                 /* This is a non-RT signal and we already have one queued.  */
1063                 return ret;
1064
1065         /*
1066          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1067          * We always use the shared queue for process-wide signals,
1068          * to avoid several races.
1069          */
1070         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1071         if (unlikely(ret))
1072                 return ret;
1073
1074         __group_complete_signal(sig, p);
1075         return 0;
1076 }
1077
1078 /*
1079  * Nuke all other threads in the group.
1080  */
1081 void zap_other_threads(struct task_struct *p)
1082 {
1083         struct task_struct *t;
1084
1085         p->signal->flags = SIGNAL_GROUP_EXIT;
1086         p->signal->group_stop_count = 0;
1087
1088         if (thread_group_empty(p))
1089                 return;
1090
1091         for (t = next_thread(p); t != p; t = next_thread(t)) {
1092                 /*
1093                  * Don't bother with already dead threads
1094                  */
1095                 if (t->exit_state)
1096                         continue;
1097
1098                 /*
1099                  * We don't want to notify the parent, since we are
1100                  * killed as part of a thread group due to another
1101                  * thread doing an execve() or similar. So set the
1102                  * exit signal to -1 to allow immediate reaping of
1103                  * the process.  But don't detach the thread group
1104                  * leader.
1105                  */
1106                 if (t != p->group_leader)
1107                         t->exit_signal = -1;
1108
1109                 /* SIGKILL will be handled before any pending SIGSTOP */
1110                 sigaddset(&t->pending.signal, SIGKILL);
1111                 signal_wake_up(t, 1);
1112         }
1113 }
1114
1115 /*
1116  * Must be called with the tasklist_lock held for reading!
1117  */
1118 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1119 {
1120         unsigned long flags;
1121         int ret;
1122
1123         ret = check_kill_permission(sig, info, p);
1124         if (!ret && sig && p->sighand) {
1125                 spin_lock_irqsave(&p->sighand->siglock, flags);
1126                 ret = __group_send_sig_info(sig, info, p);
1127                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1128         }
1129
1130         return ret;
1131 }
1132
1133 /*
1134  * kill_pg_info() sends a signal to a process group: this is what the tty
1135  * control characters do (^C, ^Z etc)
1136  */
1137
1138 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1139 {
1140         struct task_struct *p = NULL;
1141         int retval, success;
1142
1143         if (pgrp <= 0)
1144                 return -EINVAL;
1145
1146         success = 0;
1147         retval = -ESRCH;
1148         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1149                 int err = group_send_sig_info(sig, info, p);
1150                 success |= !err;
1151                 retval = err;
1152         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1153         return success ? 0 : retval;
1154 }
1155
1156 int
1157 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1158 {
1159         int retval;
1160
1161         read_lock(&tasklist_lock);
1162         retval = __kill_pg_info(sig, info, pgrp);
1163         read_unlock(&tasklist_lock);
1164
1165         return retval;
1166 }
1167
1168 int
1169 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1170 {
1171         int error;
1172         struct task_struct *p;
1173
1174         read_lock(&tasklist_lock);
1175         p = find_task_by_pid(pid);
1176         error = -ESRCH;
1177         if (p)
1178                 error = group_send_sig_info(sig, info, p);
1179         read_unlock(&tasklist_lock);
1180         return error;
1181 }
1182
1183 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1184 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1185                       uid_t uid, uid_t euid)
1186 {
1187         int ret = -EINVAL;
1188         struct task_struct *p;
1189
1190         if (!valid_signal(sig))
1191                 return ret;
1192
1193         read_lock(&tasklist_lock);
1194         p = find_task_by_pid(pid);
1195         if (!p) {
1196                 ret = -ESRCH;
1197                 goto out_unlock;
1198         }
1199         if ((!info || ((unsigned long)info != 1 &&
1200                         (unsigned long)info != 2 && SI_FROMUSER(info)))
1201             && (euid != p->suid) && (euid != p->uid)
1202             && (uid != p->suid) && (uid != p->uid)) {
1203                 ret = -EPERM;
1204                 goto out_unlock;
1205         }
1206         if (sig && p->sighand) {
1207                 unsigned long flags;
1208                 spin_lock_irqsave(&p->sighand->siglock, flags);
1209                 ret = __group_send_sig_info(sig, info, p);
1210                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1211         }
1212 out_unlock:
1213         read_unlock(&tasklist_lock);
1214         return ret;
1215 }
1216 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1217
1218 /*
1219  * kill_something_info() interprets pid in interesting ways just like kill(2).
1220  *
1221  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1222  * is probably wrong.  Should make it like BSD or SYSV.
1223  */
1224
1225 static int kill_something_info(int sig, struct siginfo *info, int pid)
1226 {
1227         if (!pid) {
1228                 return kill_pg_info(sig, info, process_group(current));
1229         } else if (pid == -1) {
1230                 int retval = 0, count = 0;
1231                 struct task_struct * p;
1232
1233                 read_lock(&tasklist_lock);
1234                 for_each_process(p) {
1235                         if (p->pid > 1 && p->tgid != current->tgid) {
1236                                 int err = group_send_sig_info(sig, info, p);
1237                                 ++count;
1238                                 if (err != -EPERM)
1239                                         retval = err;
1240                         }
1241                 }
1242                 read_unlock(&tasklist_lock);
1243                 return count ? retval : -ESRCH;
1244         } else if (pid < 0) {
1245                 return kill_pg_info(sig, info, -pid);
1246         } else {
1247                 return kill_proc_info(sig, info, pid);
1248         }
1249 }
1250
1251 /*
1252  * These are for backward compatibility with the rest of the kernel source.
1253  */
1254
1255 /*
1256  * These two are the most common entry points.  They send a signal
1257  * just to the specific thread.
1258  */
1259 int
1260 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1261 {
1262         int ret;
1263         unsigned long flags;
1264
1265         /*
1266          * Make sure legacy kernel users don't send in bad values
1267          * (normal paths check this in check_kill_permission).
1268          */
1269         if (!valid_signal(sig))
1270                 return -EINVAL;
1271
1272         /*
1273          * We need the tasklist lock even for the specific
1274          * thread case (when we don't need to follow the group
1275          * lists) in order to avoid races with "p->sighand"
1276          * going away or changing from under us.
1277          */
1278         read_lock(&tasklist_lock);  
1279         spin_lock_irqsave(&p->sighand->siglock, flags);
1280         ret = specific_send_sig_info(sig, info, p);
1281         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1282         read_unlock(&tasklist_lock);
1283         return ret;
1284 }
1285
1286 #define __si_special(priv) \
1287         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1288
1289 int
1290 send_sig(int sig, struct task_struct *p, int priv)
1291 {
1292         return send_sig_info(sig, __si_special(priv), p);
1293 }
1294
1295 /*
1296  * This is the entry point for "process-wide" signals.
1297  * They will go to an appropriate thread in the thread group.
1298  */
1299 int
1300 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1301 {
1302         int ret;
1303         read_lock(&tasklist_lock);
1304         ret = group_send_sig_info(sig, info, p);
1305         read_unlock(&tasklist_lock);
1306         return ret;
1307 }
1308
1309 void
1310 force_sig(int sig, struct task_struct *p)
1311 {
1312         force_sig_info(sig, SEND_SIG_PRIV, p);
1313 }
1314
1315 /*
1316  * When things go south during signal handling, we
1317  * will force a SIGSEGV. And if the signal that caused
1318  * the problem was already a SIGSEGV, we'll want to
1319  * make sure we don't even try to deliver the signal..
1320  */
1321 int
1322 force_sigsegv(int sig, struct task_struct *p)
1323 {
1324         if (sig == SIGSEGV) {
1325                 unsigned long flags;
1326                 spin_lock_irqsave(&p->sighand->siglock, flags);
1327                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1328                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1329         }
1330         force_sig(SIGSEGV, p);
1331         return 0;
1332 }
1333
1334 int
1335 kill_pg(pid_t pgrp, int sig, int priv)
1336 {
1337         return kill_pg_info(sig, __si_special(priv), pgrp);
1338 }
1339
1340 int
1341 kill_proc(pid_t pid, int sig, int priv)
1342 {
1343         return kill_proc_info(sig, __si_special(priv), pid);
1344 }
1345
1346 /*
1347  * These functions support sending signals using preallocated sigqueue
1348  * structures.  This is needed "because realtime applications cannot
1349  * afford to lose notifications of asynchronous events, like timer
1350  * expirations or I/O completions".  In the case of Posix Timers 
1351  * we allocate the sigqueue structure from the timer_create.  If this
1352  * allocation fails we are able to report the failure to the application
1353  * with an EAGAIN error.
1354  */
1355  
1356 struct sigqueue *sigqueue_alloc(void)
1357 {
1358         struct sigqueue *q;
1359
1360         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1361                 q->flags |= SIGQUEUE_PREALLOC;
1362         return(q);
1363 }
1364
1365 void sigqueue_free(struct sigqueue *q)
1366 {
1367         unsigned long flags;
1368         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1369         /*
1370          * If the signal is still pending remove it from the
1371          * pending queue.
1372          */
1373         if (unlikely(!list_empty(&q->list))) {
1374                 spinlock_t *lock = &current->sighand->siglock;
1375                 read_lock(&tasklist_lock);
1376                 spin_lock_irqsave(lock, flags);
1377                 if (!list_empty(&q->list))
1378                         list_del_init(&q->list);
1379                 spin_unlock_irqrestore(lock, flags);
1380                 read_unlock(&tasklist_lock);
1381         }
1382         q->flags &= ~SIGQUEUE_PREALLOC;
1383         __sigqueue_free(q);
1384 }
1385
1386 int
1387 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1388 {
1389         unsigned long flags;
1390         int ret = 0;
1391
1392         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1393         read_lock(&tasklist_lock);
1394
1395         if (unlikely(p->flags & PF_EXITING)) {
1396                 ret = -1;
1397                 goto out_err;
1398         }
1399
1400         spin_lock_irqsave(&p->sighand->siglock, flags);
1401
1402         if (unlikely(!list_empty(&q->list))) {
1403                 /*
1404                  * If an SI_TIMER entry is already queue just increment
1405                  * the overrun count.
1406                  */
1407                 if (q->info.si_code != SI_TIMER)
1408                         BUG();
1409                 q->info.si_overrun++;
1410                 goto out;
1411         }
1412         /* Short-circuit ignored signals.  */
1413         if (sig_ignored(p, sig)) {
1414                 ret = 1;
1415                 goto out;
1416         }
1417
1418         list_add_tail(&q->list, &p->pending.list);
1419         sigaddset(&p->pending.signal, sig);
1420         if (!sigismember(&p->blocked, sig))
1421                 signal_wake_up(p, sig == SIGKILL);
1422
1423 out:
1424         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1425 out_err:
1426         read_unlock(&tasklist_lock);
1427
1428         return ret;
1429 }
1430
1431 int
1432 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1433 {
1434         unsigned long flags;
1435         int ret = 0;
1436
1437         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1438         read_lock(&tasklist_lock);
1439         spin_lock_irqsave(&p->sighand->siglock, flags);
1440         handle_stop_signal(sig, p);
1441
1442         /* Short-circuit ignored signals.  */
1443         if (sig_ignored(p, sig)) {
1444                 ret = 1;
1445                 goto out;
1446         }
1447
1448         if (unlikely(!list_empty(&q->list))) {
1449                 /*
1450                  * If an SI_TIMER entry is already queue just increment
1451                  * the overrun count.  Other uses should not try to
1452                  * send the signal multiple times.
1453                  */
1454                 if (q->info.si_code != SI_TIMER)
1455                         BUG();
1456                 q->info.si_overrun++;
1457                 goto out;
1458         } 
1459
1460         /*
1461          * Put this signal on the shared-pending queue.
1462          * We always use the shared queue for process-wide signals,
1463          * to avoid several races.
1464          */
1465         list_add_tail(&q->list, &p->signal->shared_pending.list);
1466         sigaddset(&p->signal->shared_pending.signal, sig);
1467
1468         __group_complete_signal(sig, p);
1469 out:
1470         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1471         read_unlock(&tasklist_lock);
1472         return(ret);
1473 }
1474
1475 /*
1476  * Wake up any threads in the parent blocked in wait* syscalls.
1477  */
1478 static inline void __wake_up_parent(struct task_struct *p,
1479                                     struct task_struct *parent)
1480 {
1481         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1482 }
1483
1484 /*
1485  * Let a parent know about the death of a child.
1486  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1487  */
1488
1489 void do_notify_parent(struct task_struct *tsk, int sig)
1490 {
1491         struct siginfo info;
1492         unsigned long flags;
1493         struct sighand_struct *psig;
1494
1495         BUG_ON(sig == -1);
1496
1497         /* do_notify_parent_cldstop should have been called instead.  */
1498         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1499
1500         BUG_ON(!tsk->ptrace &&
1501                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1502
1503         info.si_signo = sig;
1504         info.si_errno = 0;
1505         info.si_pid = tsk->pid;
1506         info.si_uid = tsk->uid;
1507
1508         /* FIXME: find out whether or not this is supposed to be c*time. */
1509         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1510                                                        tsk->signal->utime));
1511         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1512                                                        tsk->signal->stime));
1513
1514         info.si_status = tsk->exit_code & 0x7f;
1515         if (tsk->exit_code & 0x80)
1516                 info.si_code = CLD_DUMPED;
1517         else if (tsk->exit_code & 0x7f)
1518                 info.si_code = CLD_KILLED;
1519         else {
1520                 info.si_code = CLD_EXITED;
1521                 info.si_status = tsk->exit_code >> 8;
1522         }
1523
1524         psig = tsk->parent->sighand;
1525         spin_lock_irqsave(&psig->siglock, flags);
1526         if (sig == SIGCHLD &&
1527             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1528              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1529                 /*
1530                  * We are exiting and our parent doesn't care.  POSIX.1
1531                  * defines special semantics for setting SIGCHLD to SIG_IGN
1532                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1533                  * automatically and not left for our parent's wait4 call.
1534                  * Rather than having the parent do it as a magic kind of
1535                  * signal handler, we just set this to tell do_exit that we
1536                  * can be cleaned up without becoming a zombie.  Note that
1537                  * we still call __wake_up_parent in this case, because a
1538                  * blocked sys_wait4 might now return -ECHILD.
1539                  *
1540                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1541                  * is implementation-defined: we do (if you don't want
1542                  * it, just use SIG_IGN instead).
1543                  */
1544                 tsk->exit_signal = -1;
1545                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1546                         sig = 0;
1547         }
1548         if (valid_signal(sig) && sig > 0)
1549                 __group_send_sig_info(sig, &info, tsk->parent);
1550         __wake_up_parent(tsk, tsk->parent);
1551         spin_unlock_irqrestore(&psig->siglock, flags);
1552 }
1553
1554 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1555 {
1556         struct siginfo info;
1557         unsigned long flags;
1558         struct task_struct *parent;
1559         struct sighand_struct *sighand;
1560
1561         if (to_self)
1562                 parent = tsk->parent;
1563         else {
1564                 tsk = tsk->group_leader;
1565                 parent = tsk->real_parent;
1566         }
1567
1568         info.si_signo = SIGCHLD;
1569         info.si_errno = 0;
1570         info.si_pid = tsk->pid;
1571         info.si_uid = tsk->uid;
1572
1573         /* FIXME: find out whether or not this is supposed to be c*time. */
1574         info.si_utime = cputime_to_jiffies(tsk->utime);
1575         info.si_stime = cputime_to_jiffies(tsk->stime);
1576
1577         info.si_code = why;
1578         switch (why) {
1579         case CLD_CONTINUED:
1580                 info.si_status = SIGCONT;
1581                 break;
1582         case CLD_STOPPED:
1583                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1584                 break;
1585         case CLD_TRAPPED:
1586                 info.si_status = tsk->exit_code & 0x7f;
1587                 break;
1588         default:
1589                 BUG();
1590         }
1591
1592         sighand = parent->sighand;
1593         spin_lock_irqsave(&sighand->siglock, flags);
1594         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1595             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1596                 __group_send_sig_info(SIGCHLD, &info, parent);
1597         /*
1598          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1599          */
1600         __wake_up_parent(tsk, parent);
1601         spin_unlock_irqrestore(&sighand->siglock, flags);
1602 }
1603
1604 /*
1605  * This must be called with current->sighand->siglock held.
1606  *
1607  * This should be the path for all ptrace stops.
1608  * We always set current->last_siginfo while stopped here.
1609  * That makes it a way to test a stopped process for
1610  * being ptrace-stopped vs being job-control-stopped.
1611  *
1612  * If we actually decide not to stop at all because the tracer is gone,
1613  * we leave nostop_code in current->exit_code.
1614  */
1615 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1616 {
1617         /*
1618          * If there is a group stop in progress,
1619          * we must participate in the bookkeeping.
1620          */
1621         if (current->signal->group_stop_count > 0)
1622                 --current->signal->group_stop_count;
1623
1624         current->last_siginfo = info;
1625         current->exit_code = exit_code;
1626
1627         /* Let the debugger run.  */
1628         set_current_state(TASK_TRACED);
1629         spin_unlock_irq(&current->sighand->siglock);
1630         read_lock(&tasklist_lock);
1631         if (likely(current->ptrace & PT_PTRACED) &&
1632             likely(current->parent != current->real_parent ||
1633                    !(current->ptrace & PT_ATTACHED)) &&
1634             (likely(current->parent->signal != current->signal) ||
1635              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1636                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1637                 read_unlock(&tasklist_lock);
1638                 schedule();
1639         } else {
1640                 /*
1641                  * By the time we got the lock, our tracer went away.
1642                  * Don't stop here.
1643                  */
1644                 read_unlock(&tasklist_lock);
1645                 set_current_state(TASK_RUNNING);
1646                 current->exit_code = nostop_code;
1647         }
1648
1649         /*
1650          * We are back.  Now reacquire the siglock before touching
1651          * last_siginfo, so that we are sure to have synchronized with
1652          * any signal-sending on another CPU that wants to examine it.
1653          */
1654         spin_lock_irq(&current->sighand->siglock);
1655         current->last_siginfo = NULL;
1656
1657         /*
1658          * Queued signals ignored us while we were stopped for tracing.
1659          * So check for any that we should take before resuming user mode.
1660          */
1661         recalc_sigpending();
1662 }
1663
1664 void ptrace_notify(int exit_code)
1665 {
1666         siginfo_t info;
1667
1668         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1669
1670         memset(&info, 0, sizeof info);
1671         info.si_signo = SIGTRAP;
1672         info.si_code = exit_code;
1673         info.si_pid = current->pid;
1674         info.si_uid = current->uid;
1675
1676         /* Let the debugger run.  */
1677         spin_lock_irq(&current->sighand->siglock);
1678         ptrace_stop(exit_code, 0, &info);
1679         spin_unlock_irq(&current->sighand->siglock);
1680 }
1681
1682 static void
1683 finish_stop(int stop_count)
1684 {
1685         int to_self;
1686
1687         /*
1688          * If there are no other threads in the group, or if there is
1689          * a group stop in progress and we are the last to stop,
1690          * report to the parent.  When ptraced, every thread reports itself.
1691          */
1692         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1693                 to_self = 1;
1694         else if (stop_count == 0)
1695                 to_self = 0;
1696         else
1697                 goto out;
1698
1699         read_lock(&tasklist_lock);
1700         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1701         read_unlock(&tasklist_lock);
1702
1703 out:
1704         schedule();
1705         /*
1706          * Now we don't run again until continued.
1707          */
1708         current->exit_code = 0;
1709 }
1710
1711 /*
1712  * This performs the stopping for SIGSTOP and other stop signals.
1713  * We have to stop all threads in the thread group.
1714  * Returns nonzero if we've actually stopped and released the siglock.
1715  * Returns zero if we didn't stop and still hold the siglock.
1716  */
1717 static int
1718 do_signal_stop(int signr)
1719 {
1720         struct signal_struct *sig = current->signal;
1721         struct sighand_struct *sighand = current->sighand;
1722         int stop_count = -1;
1723
1724         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1725                 return 0;
1726
1727         if (sig->group_stop_count > 0) {
1728                 /*
1729                  * There is a group stop in progress.  We don't need to
1730                  * start another one.
1731                  */
1732                 signr = sig->group_exit_code;
1733                 stop_count = --sig->group_stop_count;
1734                 current->exit_code = signr;
1735                 set_current_state(TASK_STOPPED);
1736                 if (stop_count == 0)
1737                         sig->flags = SIGNAL_STOP_STOPPED;
1738                 spin_unlock_irq(&sighand->siglock);
1739         }
1740         else if (thread_group_empty(current)) {
1741                 /*
1742                  * Lock must be held through transition to stopped state.
1743                  */
1744                 current->exit_code = current->signal->group_exit_code = signr;
1745                 set_current_state(TASK_STOPPED);
1746                 sig->flags = SIGNAL_STOP_STOPPED;
1747                 spin_unlock_irq(&sighand->siglock);
1748         }
1749         else {
1750                 /*
1751                  * There is no group stop already in progress.
1752                  * We must initiate one now, but that requires
1753                  * dropping siglock to get both the tasklist lock
1754                  * and siglock again in the proper order.  Note that
1755                  * this allows an intervening SIGCONT to be posted.
1756                  * We need to check for that and bail out if necessary.
1757                  */
1758                 struct task_struct *t;
1759
1760                 spin_unlock_irq(&sighand->siglock);
1761
1762                 /* signals can be posted during this window */
1763
1764                 read_lock(&tasklist_lock);
1765                 spin_lock_irq(&sighand->siglock);
1766
1767                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1768                         /*
1769                          * Another stop or continue happened while we
1770                          * didn't have the lock.  We can just swallow this
1771                          * signal now.  If we raced with a SIGCONT, that
1772                          * should have just cleared it now.  If we raced
1773                          * with another processor delivering a stop signal,
1774                          * then the SIGCONT that wakes us up should clear it.
1775                          */
1776                         read_unlock(&tasklist_lock);
1777                         return 0;
1778                 }
1779
1780                 if (sig->group_stop_count == 0) {
1781                         sig->group_exit_code = signr;
1782                         stop_count = 0;
1783                         for (t = next_thread(current); t != current;
1784                              t = next_thread(t))
1785                                 /*
1786                                  * Setting state to TASK_STOPPED for a group
1787                                  * stop is always done with the siglock held,
1788                                  * so this check has no races.
1789                                  */
1790                                 if (!t->exit_state &&
1791                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1792                                         stop_count++;
1793                                         signal_wake_up(t, 0);
1794                                 }
1795                         sig->group_stop_count = stop_count;
1796                 }
1797                 else {
1798                         /* A race with another thread while unlocked.  */
1799                         signr = sig->group_exit_code;
1800                         stop_count = --sig->group_stop_count;
1801                 }
1802
1803                 current->exit_code = signr;
1804                 set_current_state(TASK_STOPPED);
1805                 if (stop_count == 0)
1806                         sig->flags = SIGNAL_STOP_STOPPED;
1807
1808                 spin_unlock_irq(&sighand->siglock);
1809                 read_unlock(&tasklist_lock);
1810         }
1811
1812         finish_stop(stop_count);
1813         return 1;
1814 }
1815
1816 /*
1817  * Do appropriate magic when group_stop_count > 0.
1818  * We return nonzero if we stopped, after releasing the siglock.
1819  * We return zero if we still hold the siglock and should look
1820  * for another signal without checking group_stop_count again.
1821  */
1822 static inline int handle_group_stop(void)
1823 {
1824         int stop_count;
1825
1826         if (current->signal->group_exit_task == current) {
1827                 /*
1828                  * Group stop is so we can do a core dump,
1829                  * We are the initiating thread, so get on with it.
1830                  */
1831                 current->signal->group_exit_task = NULL;
1832                 return 0;
1833         }
1834
1835         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1836                 /*
1837                  * Group stop is so another thread can do a core dump,
1838                  * or else we are racing against a death signal.
1839                  * Just punt the stop so we can get the next signal.
1840                  */
1841                 return 0;
1842
1843         /*
1844          * There is a group stop in progress.  We stop
1845          * without any associated signal being in our queue.
1846          */
1847         stop_count = --current->signal->group_stop_count;
1848         if (stop_count == 0)
1849                 current->signal->flags = SIGNAL_STOP_STOPPED;
1850         current->exit_code = current->signal->group_exit_code;
1851         set_current_state(TASK_STOPPED);
1852         spin_unlock_irq(&current->sighand->siglock);
1853         finish_stop(stop_count);
1854         return 1;
1855 }
1856
1857 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1858                           struct pt_regs *regs, void *cookie)
1859 {
1860         sigset_t *mask = &current->blocked;
1861         int signr = 0;
1862
1863 relock:
1864         spin_lock_irq(&current->sighand->siglock);
1865         for (;;) {
1866                 struct k_sigaction *ka;
1867
1868                 if (unlikely(current->signal->group_stop_count > 0) &&
1869                     handle_group_stop())
1870                         goto relock;
1871
1872                 signr = dequeue_signal(current, mask, info);
1873
1874                 if (!signr)
1875                         break; /* will return 0 */
1876
1877                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1878                         ptrace_signal_deliver(regs, cookie);
1879
1880                         /* Let the debugger run.  */
1881                         ptrace_stop(signr, signr, info);
1882
1883                         /* We're back.  Did the debugger cancel the sig or group_exit? */
1884                         signr = current->exit_code;
1885                         if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1886                                 continue;
1887
1888                         current->exit_code = 0;
1889
1890                         /* Update the siginfo structure if the signal has
1891                            changed.  If the debugger wanted something
1892                            specific in the siginfo structure then it should
1893                            have updated *info via PTRACE_SETSIGINFO.  */
1894                         if (signr != info->si_signo) {
1895                                 info->si_signo = signr;
1896                                 info->si_errno = 0;
1897                                 info->si_code = SI_USER;
1898                                 info->si_pid = current->parent->pid;
1899                                 info->si_uid = current->parent->uid;
1900                         }
1901
1902                         /* If the (new) signal is now blocked, requeue it.  */
1903                         if (sigismember(&current->blocked, signr)) {
1904                                 specific_send_sig_info(signr, info, current);
1905                                 continue;
1906                         }
1907                 }
1908
1909                 ka = &current->sighand->action[signr-1];
1910                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1911                         continue;
1912                 if (ka->sa.sa_handler != SIG_DFL) {
1913                         /* Run the handler.  */
1914                         *return_ka = *ka;
1915
1916                         if (ka->sa.sa_flags & SA_ONESHOT)
1917                                 ka->sa.sa_handler = SIG_DFL;
1918
1919                         break; /* will return non-zero "signr" value */
1920                 }
1921
1922                 /*
1923                  * Now we are doing the default action for this signal.
1924                  */
1925                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1926                         continue;
1927
1928                 /* Init gets no signals it doesn't want.  */
1929                 if (current->pid == 1)
1930                         continue;
1931
1932                 if (sig_kernel_stop(signr)) {
1933                         /*
1934                          * The default action is to stop all threads in
1935                          * the thread group.  The job control signals
1936                          * do nothing in an orphaned pgrp, but SIGSTOP
1937                          * always works.  Note that siglock needs to be
1938                          * dropped during the call to is_orphaned_pgrp()
1939                          * because of lock ordering with tasklist_lock.
1940                          * This allows an intervening SIGCONT to be posted.
1941                          * We need to check for that and bail out if necessary.
1942                          */
1943                         if (signr != SIGSTOP) {
1944                                 spin_unlock_irq(&current->sighand->siglock);
1945
1946                                 /* signals can be posted during this window */
1947
1948                                 if (is_orphaned_pgrp(process_group(current)))
1949                                         goto relock;
1950
1951                                 spin_lock_irq(&current->sighand->siglock);
1952                         }
1953
1954                         if (likely(do_signal_stop(signr))) {
1955                                 /* It released the siglock.  */
1956                                 goto relock;
1957                         }
1958
1959                         /*
1960                          * We didn't actually stop, due to a race
1961                          * with SIGCONT or something like that.
1962                          */
1963                         continue;
1964                 }
1965
1966                 spin_unlock_irq(&current->sighand->siglock);
1967
1968                 /*
1969                  * Anything else is fatal, maybe with a core dump.
1970                  */
1971                 current->flags |= PF_SIGNALED;
1972                 if (sig_kernel_coredump(signr)) {
1973                         /*
1974                          * If it was able to dump core, this kills all
1975                          * other threads in the group and synchronizes with
1976                          * their demise.  If we lost the race with another
1977                          * thread getting here, it set group_exit_code
1978                          * first and our do_group_exit call below will use
1979                          * that value and ignore the one we pass it.
1980                          */
1981                         do_coredump((long)signr, signr, regs);
1982                 }
1983
1984                 /*
1985                  * Death signals, no core dump.
1986                  */
1987                 do_group_exit(signr);
1988                 /* NOTREACHED */
1989         }
1990         spin_unlock_irq(&current->sighand->siglock);
1991         return signr;
1992 }
1993
1994 EXPORT_SYMBOL(recalc_sigpending);
1995 EXPORT_SYMBOL_GPL(dequeue_signal);
1996 EXPORT_SYMBOL(flush_signals);
1997 EXPORT_SYMBOL(force_sig);
1998 EXPORT_SYMBOL(kill_pg);
1999 EXPORT_SYMBOL(kill_proc);
2000 EXPORT_SYMBOL(ptrace_notify);
2001 EXPORT_SYMBOL(send_sig);
2002 EXPORT_SYMBOL(send_sig_info);
2003 EXPORT_SYMBOL(sigprocmask);
2004 EXPORT_SYMBOL(block_all_signals);
2005 EXPORT_SYMBOL(unblock_all_signals);
2006
2007
2008 /*
2009  * System call entry points.
2010  */
2011
2012 asmlinkage long sys_restart_syscall(void)
2013 {
2014         struct restart_block *restart = &current_thread_info()->restart_block;
2015         return restart->fn(restart);
2016 }
2017
2018 long do_no_restart_syscall(struct restart_block *param)
2019 {
2020         return -EINTR;
2021 }
2022
2023 /*
2024  * We don't need to get the kernel lock - this is all local to this
2025  * particular thread.. (and that's good, because this is _heavily_
2026  * used by various programs)
2027  */
2028
2029 /*
2030  * This is also useful for kernel threads that want to temporarily
2031  * (or permanently) block certain signals.
2032  *
2033  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2034  * interface happily blocks "unblockable" signals like SIGKILL
2035  * and friends.
2036  */
2037 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2038 {
2039         int error;
2040         sigset_t old_block;
2041
2042         spin_lock_irq(&current->sighand->siglock);
2043         old_block = current->blocked;
2044         error = 0;
2045         switch (how) {
2046         case SIG_BLOCK:
2047                 sigorsets(&current->blocked, &current->blocked, set);
2048                 break;
2049         case SIG_UNBLOCK:
2050                 signandsets(&current->blocked, &current->blocked, set);
2051                 break;
2052         case SIG_SETMASK:
2053                 current->blocked = *set;
2054                 break;
2055         default:
2056                 error = -EINVAL;
2057         }
2058         recalc_sigpending();
2059         spin_unlock_irq(&current->sighand->siglock);
2060         if (oldset)
2061                 *oldset = old_block;
2062         return error;
2063 }
2064
2065 asmlinkage long
2066 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2067 {
2068         int error = -EINVAL;
2069         sigset_t old_set, new_set;
2070
2071         /* XXX: Don't preclude handling different sized sigset_t's.  */
2072         if (sigsetsize != sizeof(sigset_t))
2073                 goto out;
2074
2075         if (set) {
2076                 error = -EFAULT;
2077                 if (copy_from_user(&new_set, set, sizeof(*set)))
2078                         goto out;
2079                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2080
2081                 error = sigprocmask(how, &new_set, &old_set);
2082                 if (error)
2083                         goto out;
2084                 if (oset)
2085                         goto set_old;
2086         } else if (oset) {
2087                 spin_lock_irq(&current->sighand->siglock);
2088                 old_set = current->blocked;
2089                 spin_unlock_irq(&current->sighand->siglock);
2090
2091         set_old:
2092                 error = -EFAULT;
2093                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2094                         goto out;
2095         }
2096         error = 0;
2097 out:
2098         return error;
2099 }
2100
2101 long do_sigpending(void __user *set, unsigned long sigsetsize)
2102 {
2103         long error = -EINVAL;
2104         sigset_t pending;
2105
2106         if (sigsetsize > sizeof(sigset_t))
2107                 goto out;
2108
2109         spin_lock_irq(&current->sighand->siglock);
2110         sigorsets(&pending, &current->pending.signal,
2111                   &current->signal->shared_pending.signal);
2112         spin_unlock_irq(&current->sighand->siglock);
2113
2114         /* Outside the lock because only this thread touches it.  */
2115         sigandsets(&pending, &current->blocked, &pending);
2116
2117         error = -EFAULT;
2118         if (!copy_to_user(set, &pending, sigsetsize))
2119                 error = 0;
2120
2121 out:
2122         return error;
2123 }       
2124
2125 asmlinkage long
2126 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2127 {
2128         return do_sigpending(set, sigsetsize);
2129 }
2130
2131 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2132
2133 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2134 {
2135         int err;
2136
2137         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2138                 return -EFAULT;
2139         if (from->si_code < 0)
2140                 return __copy_to_user(to, from, sizeof(siginfo_t))
2141                         ? -EFAULT : 0;
2142         /*
2143          * If you change siginfo_t structure, please be sure
2144          * this code is fixed accordingly.
2145          * It should never copy any pad contained in the structure
2146          * to avoid security leaks, but must copy the generic
2147          * 3 ints plus the relevant union member.
2148          */
2149         err = __put_user(from->si_signo, &to->si_signo);
2150         err |= __put_user(from->si_errno, &to->si_errno);
2151         err |= __put_user((short)from->si_code, &to->si_code);
2152         switch (from->si_code & __SI_MASK) {
2153         case __SI_KILL:
2154                 err |= __put_user(from->si_pid, &to->si_pid);
2155                 err |= __put_user(from->si_uid, &to->si_uid);
2156                 break;
2157         case __SI_TIMER:
2158                  err |= __put_user(from->si_tid, &to->si_tid);
2159                  err |= __put_user(from->si_overrun, &to->si_overrun);
2160                  err |= __put_user(from->si_ptr, &to->si_ptr);
2161                 break;
2162         case __SI_POLL:
2163                 err |= __put_user(from->si_band, &to->si_band);
2164                 err |= __put_user(from->si_fd, &to->si_fd);
2165                 break;
2166         case __SI_FAULT:
2167                 err |= __put_user(from->si_addr, &to->si_addr);
2168 #ifdef __ARCH_SI_TRAPNO
2169                 err |= __put_user(from->si_trapno, &to->si_trapno);
2170 #endif
2171                 break;
2172         case __SI_CHLD:
2173                 err |= __put_user(from->si_pid, &to->si_pid);
2174                 err |= __put_user(from->si_uid, &to->si_uid);
2175                 err |= __put_user(from->si_status, &to->si_status);
2176                 err |= __put_user(from->si_utime, &to->si_utime);
2177                 err |= __put_user(from->si_stime, &to->si_stime);
2178                 break;
2179         case __SI_RT: /* This is not generated by the kernel as of now. */
2180         case __SI_MESGQ: /* But this is */
2181                 err |= __put_user(from->si_pid, &to->si_pid);
2182                 err |= __put_user(from->si_uid, &to->si_uid);
2183                 err |= __put_user(from->si_ptr, &to->si_ptr);
2184                 break;
2185         default: /* this is just in case for now ... */
2186                 err |= __put_user(from->si_pid, &to->si_pid);
2187                 err |= __put_user(from->si_uid, &to->si_uid);
2188                 break;
2189         }
2190         return err;
2191 }
2192
2193 #endif
2194
2195 asmlinkage long
2196 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2197                     siginfo_t __user *uinfo,
2198                     const struct timespec __user *uts,
2199                     size_t sigsetsize)
2200 {
2201         int ret, sig;
2202         sigset_t these;
2203         struct timespec ts;
2204         siginfo_t info;
2205         long timeout = 0;
2206
2207         /* XXX: Don't preclude handling different sized sigset_t's.  */
2208         if (sigsetsize != sizeof(sigset_t))
2209                 return -EINVAL;
2210
2211         if (copy_from_user(&these, uthese, sizeof(these)))
2212                 return -EFAULT;
2213                 
2214         /*
2215          * Invert the set of allowed signals to get those we
2216          * want to block.
2217          */
2218         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2219         signotset(&these);
2220
2221         if (uts) {
2222                 if (copy_from_user(&ts, uts, sizeof(ts)))
2223                         return -EFAULT;
2224                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2225                     || ts.tv_sec < 0)
2226                         return -EINVAL;
2227         }
2228
2229         spin_lock_irq(&current->sighand->siglock);
2230         sig = dequeue_signal(current, &these, &info);
2231         if (!sig) {
2232                 timeout = MAX_SCHEDULE_TIMEOUT;
2233                 if (uts)
2234                         timeout = (timespec_to_jiffies(&ts)
2235                                    + (ts.tv_sec || ts.tv_nsec));
2236
2237                 if (timeout) {
2238                         /* None ready -- temporarily unblock those we're
2239                          * interested while we are sleeping in so that we'll
2240                          * be awakened when they arrive.  */
2241                         current->real_blocked = current->blocked;
2242                         sigandsets(&current->blocked, &current->blocked, &these);
2243                         recalc_sigpending();
2244                         spin_unlock_irq(&current->sighand->siglock);
2245
2246                         timeout = schedule_timeout_interruptible(timeout);
2247
2248                         try_to_freeze();
2249                         spin_lock_irq(&current->sighand->siglock);
2250                         sig = dequeue_signal(current, &these, &info);
2251                         current->blocked = current->real_blocked;
2252                         siginitset(&current->real_blocked, 0);
2253                         recalc_sigpending();
2254                 }
2255         }
2256         spin_unlock_irq(&current->sighand->siglock);
2257
2258         if (sig) {
2259                 ret = sig;
2260                 if (uinfo) {
2261                         if (copy_siginfo_to_user(uinfo, &info))
2262                                 ret = -EFAULT;
2263                 }
2264         } else {
2265                 ret = -EAGAIN;
2266                 if (timeout)
2267                         ret = -EINTR;
2268         }
2269
2270         return ret;
2271 }
2272
2273 asmlinkage long
2274 sys_kill(int pid, int sig)
2275 {
2276         struct siginfo info;
2277
2278         info.si_signo = sig;
2279         info.si_errno = 0;
2280         info.si_code = SI_USER;
2281         info.si_pid = current->tgid;
2282         info.si_uid = current->uid;
2283
2284         return kill_something_info(sig, &info, pid);
2285 }
2286
2287 static int do_tkill(int tgid, int pid, int sig)
2288 {
2289         int error;
2290         struct siginfo info;
2291         struct task_struct *p;
2292
2293         error = -ESRCH;
2294         info.si_signo = sig;
2295         info.si_errno = 0;
2296         info.si_code = SI_TKILL;
2297         info.si_pid = current->tgid;
2298         info.si_uid = current->uid;
2299
2300         read_lock(&tasklist_lock);
2301         p = find_task_by_pid(pid);
2302         if (p && (tgid <= 0 || p->tgid == tgid)) {
2303                 error = check_kill_permission(sig, &info, p);
2304                 /*
2305                  * The null signal is a permissions and process existence
2306                  * probe.  No signal is actually delivered.
2307                  */
2308                 if (!error && sig && p->sighand) {
2309                         spin_lock_irq(&p->sighand->siglock);
2310                         handle_stop_signal(sig, p);
2311                         error = specific_send_sig_info(sig, &info, p);
2312                         spin_unlock_irq(&p->sighand->siglock);
2313                 }
2314         }
2315         read_unlock(&tasklist_lock);
2316
2317         return error;
2318 }
2319
2320 /**
2321  *  sys_tgkill - send signal to one specific thread
2322  *  @tgid: the thread group ID of the thread
2323  *  @pid: the PID of the thread
2324  *  @sig: signal to be sent
2325  *
2326  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2327  *  exists but it's not belonging to the target process anymore. This
2328  *  method solves the problem of threads exiting and PIDs getting reused.
2329  */
2330 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2331 {
2332         /* This is only valid for single tasks */
2333         if (pid <= 0 || tgid <= 0)
2334                 return -EINVAL;
2335
2336         return do_tkill(tgid, pid, sig);
2337 }
2338
2339 /*
2340  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2341  */
2342 asmlinkage long
2343 sys_tkill(int pid, int sig)
2344 {
2345         /* This is only valid for single tasks */
2346         if (pid <= 0)
2347                 return -EINVAL;
2348
2349         return do_tkill(0, pid, sig);
2350 }
2351
2352 asmlinkage long
2353 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2354 {
2355         siginfo_t info;
2356
2357         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2358                 return -EFAULT;
2359
2360         /* Not even root can pretend to send signals from the kernel.
2361            Nor can they impersonate a kill(), which adds source info.  */
2362         if (info.si_code >= 0)
2363                 return -EPERM;
2364         info.si_signo = sig;
2365
2366         /* POSIX.1b doesn't mention process groups.  */
2367         return kill_proc_info(sig, &info, pid);
2368 }
2369
2370 int
2371 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2372 {
2373         struct k_sigaction *k;
2374
2375         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2376                 return -EINVAL;
2377
2378         k = &current->sighand->action[sig-1];
2379
2380         spin_lock_irq(&current->sighand->siglock);
2381         if (signal_pending(current)) {
2382                 /*
2383                  * If there might be a fatal signal pending on multiple
2384                  * threads, make sure we take it before changing the action.
2385                  */
2386                 spin_unlock_irq(&current->sighand->siglock);
2387                 return -ERESTARTNOINTR;
2388         }
2389
2390         if (oact)
2391                 *oact = *k;
2392
2393         if (act) {
2394                 /*
2395                  * POSIX 3.3.1.3:
2396                  *  "Setting a signal action to SIG_IGN for a signal that is
2397                  *   pending shall cause the pending signal to be discarded,
2398                  *   whether or not it is blocked."
2399                  *
2400                  *  "Setting a signal action to SIG_DFL for a signal that is
2401                  *   pending and whose default action is to ignore the signal
2402                  *   (for example, SIGCHLD), shall cause the pending signal to
2403                  *   be discarded, whether or not it is blocked"
2404                  */
2405                 if (act->sa.sa_handler == SIG_IGN ||
2406                     (act->sa.sa_handler == SIG_DFL &&
2407                      sig_kernel_ignore(sig))) {
2408                         /*
2409                          * This is a fairly rare case, so we only take the
2410                          * tasklist_lock once we're sure we'll need it.
2411                          * Now we must do this little unlock and relock
2412                          * dance to maintain the lock hierarchy.
2413                          */
2414                         struct task_struct *t = current;
2415                         spin_unlock_irq(&t->sighand->siglock);
2416                         read_lock(&tasklist_lock);
2417                         spin_lock_irq(&t->sighand->siglock);
2418                         *k = *act;
2419                         sigdelsetmask(&k->sa.sa_mask,
2420                                       sigmask(SIGKILL) | sigmask(SIGSTOP));
2421                         rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2422                         do {
2423                                 rm_from_queue(sigmask(sig), &t->pending);
2424                                 recalc_sigpending_tsk(t);
2425                                 t = next_thread(t);
2426                         } while (t != current);
2427                         spin_unlock_irq(&current->sighand->siglock);
2428                         read_unlock(&tasklist_lock);
2429                         return 0;
2430                 }
2431
2432                 *k = *act;
2433                 sigdelsetmask(&k->sa.sa_mask,
2434                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2435         }
2436
2437         spin_unlock_irq(&current->sighand->siglock);
2438         return 0;
2439 }
2440
2441 int 
2442 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2443 {
2444         stack_t oss;
2445         int error;
2446
2447         if (uoss) {
2448                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2449                 oss.ss_size = current->sas_ss_size;
2450                 oss.ss_flags = sas_ss_flags(sp);
2451         }
2452
2453         if (uss) {
2454                 void __user *ss_sp;
2455                 size_t ss_size;
2456                 int ss_flags;
2457
2458                 error = -EFAULT;
2459                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2460                     || __get_user(ss_sp, &uss->ss_sp)
2461                     || __get_user(ss_flags, &uss->ss_flags)
2462                     || __get_user(ss_size, &uss->ss_size))
2463                         goto out;
2464
2465                 error = -EPERM;
2466                 if (on_sig_stack(sp))
2467                         goto out;
2468
2469                 error = -EINVAL;
2470                 /*
2471                  *
2472                  * Note - this code used to test ss_flags incorrectly
2473                  *        old code may have been written using ss_flags==0
2474                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2475                  *        way that worked) - this fix preserves that older
2476                  *        mechanism
2477                  */
2478                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2479                         goto out;
2480
2481                 if (ss_flags == SS_DISABLE) {
2482                         ss_size = 0;
2483                         ss_sp = NULL;
2484                 } else {
2485                         error = -ENOMEM;
2486                         if (ss_size < MINSIGSTKSZ)
2487                                 goto out;
2488                 }
2489
2490                 current->sas_ss_sp = (unsigned long) ss_sp;
2491                 current->sas_ss_size = ss_size;
2492         }
2493
2494         if (uoss) {
2495                 error = -EFAULT;
2496                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2497                         goto out;
2498         }
2499
2500         error = 0;
2501 out:
2502         return error;
2503 }
2504
2505 #ifdef __ARCH_WANT_SYS_SIGPENDING
2506
2507 asmlinkage long
2508 sys_sigpending(old_sigset_t __user *set)
2509 {
2510         return do_sigpending(set, sizeof(*set));
2511 }
2512
2513 #endif
2514
2515 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2516 /* Some platforms have their own version with special arguments others
2517    support only sys_rt_sigprocmask.  */
2518
2519 asmlinkage long
2520 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2521 {
2522         int error;
2523         old_sigset_t old_set, new_set;
2524
2525         if (set) {
2526                 error = -EFAULT;
2527                 if (copy_from_user(&new_set, set, sizeof(*set)))
2528                         goto out;
2529                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2530
2531                 spin_lock_irq(&current->sighand->siglock);
2532                 old_set = current->blocked.sig[0];
2533
2534                 error = 0;
2535                 switch (how) {
2536                 default:
2537                         error = -EINVAL;
2538                         break;
2539                 case SIG_BLOCK:
2540                         sigaddsetmask(&current->blocked, new_set);
2541                         break;
2542                 case SIG_UNBLOCK:
2543                         sigdelsetmask(&current->blocked, new_set);
2544                         break;
2545                 case SIG_SETMASK:
2546                         current->blocked.sig[0] = new_set;
2547                         break;
2548                 }
2549
2550                 recalc_sigpending();
2551                 spin_unlock_irq(&current->sighand->siglock);
2552                 if (error)
2553                         goto out;
2554                 if (oset)
2555                         goto set_old;
2556         } else if (oset) {
2557                 old_set = current->blocked.sig[0];
2558         set_old:
2559                 error = -EFAULT;
2560                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2561                         goto out;
2562         }
2563         error = 0;
2564 out:
2565         return error;
2566 }
2567 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2568
2569 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2570 asmlinkage long
2571 sys_rt_sigaction(int sig,
2572                  const struct sigaction __user *act,
2573                  struct sigaction __user *oact,
2574                  size_t sigsetsize)
2575 {
2576         struct k_sigaction new_sa, old_sa;
2577         int ret = -EINVAL;
2578
2579         /* XXX: Don't preclude handling different sized sigset_t's.  */
2580         if (sigsetsize != sizeof(sigset_t))
2581                 goto out;
2582
2583         if (act) {
2584                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2585                         return -EFAULT;
2586         }
2587
2588         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2589
2590         if (!ret && oact) {
2591                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2592                         return -EFAULT;
2593         }
2594 out:
2595         return ret;
2596 }
2597 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2598
2599 #ifdef __ARCH_WANT_SYS_SGETMASK
2600
2601 /*
2602  * For backwards compatibility.  Functionality superseded by sigprocmask.
2603  */
2604 asmlinkage long
2605 sys_sgetmask(void)
2606 {
2607         /* SMP safe */
2608         return current->blocked.sig[0];
2609 }
2610
2611 asmlinkage long
2612 sys_ssetmask(int newmask)
2613 {
2614         int old;
2615
2616         spin_lock_irq(&current->sighand->siglock);
2617         old = current->blocked.sig[0];
2618
2619         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2620                                                   sigmask(SIGSTOP)));
2621         recalc_sigpending();
2622         spin_unlock_irq(&current->sighand->siglock);
2623
2624         return old;
2625 }
2626 #endif /* __ARCH_WANT_SGETMASK */
2627
2628 #ifdef __ARCH_WANT_SYS_SIGNAL
2629 /*
2630  * For backwards compatibility.  Functionality superseded by sigaction.
2631  */
2632 asmlinkage unsigned long
2633 sys_signal(int sig, __sighandler_t handler)
2634 {
2635         struct k_sigaction new_sa, old_sa;
2636         int ret;
2637
2638         new_sa.sa.sa_handler = handler;
2639         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2640
2641         ret = do_sigaction(sig, &new_sa, &old_sa);
2642
2643         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2644 }
2645 #endif /* __ARCH_WANT_SYS_SIGNAL */
2646
2647 #ifdef __ARCH_WANT_SYS_PAUSE
2648
2649 asmlinkage long
2650 sys_pause(void)
2651 {
2652         current->state = TASK_INTERRUPTIBLE;
2653         schedule();
2654         return -ERESTARTNOHAND;
2655 }
2656
2657 #endif
2658
2659 void __init signals_init(void)
2660 {
2661         sigqueue_cachep =
2662                 kmem_cache_create("sigqueue",
2663                                   sizeof(struct sigqueue),
2664                                   __alignof__(struct sigqueue),
2665                                   SLAB_PANIC, NULL, NULL);
2666 }