blob: c43a3f19d477eba77787c8a2e657a4987288cf3b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
14#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070023#include <linux/signal.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080024#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080025#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080026#include <linux/pid_namespace.h>
27#include <linux/nsproxy.h>
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/param.h>
30#include <asm/uaccess.h>
31#include <asm/unistd.h>
32#include <asm/siginfo.h>
Al Viroe1396062006-05-25 10:19:47 -040033#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35/*
36 * SLAB caches for signal bits.
37 */
38
Christoph Lametere18b8902006-12-06 20:33:20 -080039static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42static int sig_ignored(struct task_struct *t, int sig)
43{
44 void __user * handler;
45
46 /*
47 * Tracers always want to know about signals..
48 */
49 if (t->ptrace & PT_PTRACED)
50 return 0;
51
52 /*
53 * Blocked signals are never ignored, since the
54 * signal handler may change by the time it is
55 * unblocked.
56 */
57 if (sigismember(&t->blocked, sig))
58 return 0;
59
60 /* Is it explicitly or implicitly ignored? */
61 handler = t->sighand->action[sig-1].sa.sa_handler;
62 return handler == SIG_IGN ||
63 (handler == SIG_DFL && sig_kernel_ignore(sig));
64}
65
66/*
67 * Re-calculate pending state from the set of locally pending
68 * signals, globally pending signals, and blocked signals.
69 */
70static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
71{
72 unsigned long ready;
73 long i;
74
75 switch (_NSIG_WORDS) {
76 default:
77 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
78 ready |= signal->sig[i] &~ blocked->sig[i];
79 break;
80
81 case 4: ready = signal->sig[3] &~ blocked->sig[3];
82 ready |= signal->sig[2] &~ blocked->sig[2];
83 ready |= signal->sig[1] &~ blocked->sig[1];
84 ready |= signal->sig[0] &~ blocked->sig[0];
85 break;
86
87 case 2: ready = signal->sig[1] &~ blocked->sig[1];
88 ready |= signal->sig[0] &~ blocked->sig[0];
89 break;
90
91 case 1: ready = signal->sig[0] &~ blocked->sig[0];
92 }
93 return ready != 0;
94}
95
96#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
97
98fastcall void recalc_sigpending_tsk(struct task_struct *t)
99{
100 if (t->signal->group_stop_count > 0 ||
Christoph Lameter3e1d1d22005-06-24 23:13:50 -0700101 (freezing(t)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 PENDING(&t->pending, &t->blocked) ||
103 PENDING(&t->signal->shared_pending, &t->blocked))
104 set_tsk_thread_flag(t, TIF_SIGPENDING);
105 else
106 clear_tsk_thread_flag(t, TIF_SIGPENDING);
107}
108
109void recalc_sigpending(void)
110{
111 recalc_sigpending_tsk(current);
112}
113
114/* Given the mask, find the first available signal that should be serviced. */
115
116static int
117next_signal(struct sigpending *pending, sigset_t *mask)
118{
119 unsigned long i, *s, *m, x;
120 int sig = 0;
121
122 s = pending->signal.sig;
123 m = mask->sig;
124 switch (_NSIG_WORDS) {
125 default:
126 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
127 if ((x = *s &~ *m) != 0) {
128 sig = ffz(~x) + i*_NSIG_BPW + 1;
129 break;
130 }
131 break;
132
133 case 2: if ((x = s[0] &~ m[0]) != 0)
134 sig = 1;
135 else if ((x = s[1] &~ m[1]) != 0)
136 sig = _NSIG_BPW + 1;
137 else
138 break;
139 sig += ffz(~x);
140 break;
141
142 case 1: if ((x = *s &~ *m) != 0)
143 sig = ffz(~x) + 1;
144 break;
145 }
146
147 return sig;
148}
149
Al Virodd0fc662005-10-07 07:46:04 +0100150static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 int override_rlimit)
152{
153 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800154 struct user_struct *user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800156 /*
157 * In order to avoid problems with "switch_user()", we want to make
158 * sure that the compiler doesn't re-load "t->user"
159 */
160 user = t->user;
161 barrier();
162 atomic_inc(&user->sigpending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (override_rlimit ||
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800164 atomic_read(&user->sigpending) <=
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
166 q = kmem_cache_alloc(sigqueue_cachep, flags);
167 if (unlikely(q == NULL)) {
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800168 atomic_dec(&user->sigpending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 } else {
170 INIT_LIST_HEAD(&q->list);
171 q->flags = 0;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800172 q->user = get_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174 return(q);
175}
176
Andrew Morton514a01b2006-02-03 03:04:41 -0800177static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 if (q->flags & SIGQUEUE_PREALLOC)
180 return;
181 atomic_dec(&q->user->sigpending);
182 free_uid(q->user);
183 kmem_cache_free(sigqueue_cachep, q);
184}
185
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800186void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct sigqueue *q;
189
190 sigemptyset(&queue->signal);
191 while (!list_empty(&queue->list)) {
192 q = list_entry(queue->list.next, struct sigqueue , list);
193 list_del_init(&q->list);
194 __sigqueue_free(q);
195 }
196}
197
198/*
199 * Flush all pending signals for a task.
200 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800201void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 unsigned long flags;
204
205 spin_lock_irqsave(&t->sighand->siglock, flags);
206 clear_tsk_thread_flag(t,TIF_SIGPENDING);
207 flush_sigqueue(&t->pending);
208 flush_sigqueue(&t->signal->shared_pending);
209 spin_unlock_irqrestore(&t->sighand->siglock, flags);
210}
211
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700212void ignore_signals(struct task_struct *t)
213{
214 int i;
215
216 for (i = 0; i < _NSIG; ++i)
217 t->sighand->action[i].sa.sa_handler = SIG_IGN;
218
219 flush_signals(t);
220}
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 * Flush all handlers for a task.
224 */
225
226void
227flush_signal_handlers(struct task_struct *t, int force_default)
228{
229 int i;
230 struct k_sigaction *ka = &t->sighand->action[0];
231 for (i = _NSIG ; i != 0 ; i--) {
232 if (force_default || ka->sa.sa_handler != SIG_IGN)
233 ka->sa.sa_handler = SIG_DFL;
234 ka->sa.sa_flags = 0;
235 sigemptyset(&ka->sa.sa_mask);
236 ka++;
237 }
238}
239
240
241/* Notify the system that a driver wants to block all signals for this
242 * process, and wants to be notified if any signals at all were to be
243 * sent/acted upon. If the notifier routine returns non-zero, then the
244 * signal will be acted upon after all. If the notifier routine returns 0,
245 * then then signal will be blocked. Only one block per process is
246 * allowed. priv is a pointer to private data that the notifier routine
247 * can use to determine if the signal should be blocked or not. */
248
249void
250block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
251{
252 unsigned long flags;
253
254 spin_lock_irqsave(&current->sighand->siglock, flags);
255 current->notifier_mask = mask;
256 current->notifier_data = priv;
257 current->notifier = notifier;
258 spin_unlock_irqrestore(&current->sighand->siglock, flags);
259}
260
261/* Notify the system that blocking has ended. */
262
263void
264unblock_all_signals(void)
265{
266 unsigned long flags;
267
268 spin_lock_irqsave(&current->sighand->siglock, flags);
269 current->notifier = NULL;
270 current->notifier_data = NULL;
271 recalc_sigpending();
272 spin_unlock_irqrestore(&current->sighand->siglock, flags);
273}
274
Arjan van de Ven858119e2006-01-14 13:20:43 -0800275static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
277 struct sigqueue *q, *first = NULL;
278 int still_pending = 0;
279
280 if (unlikely(!sigismember(&list->signal, sig)))
281 return 0;
282
283 /*
284 * Collect the siginfo appropriate to this signal. Check if
285 * there is another siginfo for the same signal.
286 */
287 list_for_each_entry(q, &list->list, list) {
288 if (q->info.si_signo == sig) {
289 if (first) {
290 still_pending = 1;
291 break;
292 }
293 first = q;
294 }
295 }
296 if (first) {
297 list_del_init(&first->list);
298 copy_siginfo(info, &first->info);
299 __sigqueue_free(first);
300 if (!still_pending)
301 sigdelset(&list->signal, sig);
302 } else {
303
304 /* Ok, it wasn't in the queue. This must be
305 a fast-pathed signal or we must have been
306 out of queue space. So zero out the info.
307 */
308 sigdelset(&list->signal, sig);
309 info->si_signo = sig;
310 info->si_errno = 0;
311 info->si_code = 0;
312 info->si_pid = 0;
313 info->si_uid = 0;
314 }
315 return 1;
316}
317
318static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
319 siginfo_t *info)
320{
Roland McGrath27d91e02006-09-29 02:00:31 -0700321 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 if (sig) {
324 if (current->notifier) {
325 if (sigismember(current->notifier_mask, sig)) {
326 if (!(current->notifier)(current->notifier_data)) {
327 clear_thread_flag(TIF_SIGPENDING);
328 return 0;
329 }
330 }
331 }
332
333 if (!collect_signal(sig, pending, info))
334 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337 return sig;
338}
339
340/*
341 * Dequeue a signal and return the element to the caller, which is
342 * expected to free it.
343 *
344 * All callers have to hold the siglock.
345 */
346int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
347{
348 int signr = __dequeue_signal(&tsk->pending, mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800349 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 signr = __dequeue_signal(&tsk->signal->shared_pending,
351 mask, info);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800352 /*
353 * itimer signal ?
354 *
355 * itimers are process shared and we restart periodic
356 * itimers in the signal delivery path to prevent DoS
357 * attacks in the high resolution timer case. This is
358 * compliant with the old way of self restarting
359 * itimers, as the SIGALRM is a legacy signal and only
360 * queued once. Changing the restart behaviour to
361 * restart the timer in the signal dequeue path is
362 * reducing the timer noise on heavy loaded !highres
363 * systems too.
364 */
365 if (unlikely(signr == SIGALRM)) {
366 struct hrtimer *tmr = &tsk->signal->real_timer;
367
368 if (!hrtimer_is_queued(tmr) &&
369 tsk->signal->it_real_incr.tv64 != 0) {
370 hrtimer_forward(tmr, tmr->base->get_time(),
371 tsk->signal->it_real_incr);
372 hrtimer_restart(tmr);
373 }
374 }
375 }
Roland McGrath27d91e02006-09-29 02:00:31 -0700376 recalc_sigpending_tsk(tsk);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800377 if (signr && unlikely(sig_kernel_stop(signr))) {
378 /*
379 * Set a marker that we have dequeued a stop signal. Our
380 * caller might release the siglock and then the pending
381 * stop signal it is about to process is no longer in the
382 * pending bitmasks, but must still be cleared by a SIGCONT
383 * (and overruled by a SIGKILL). So those cases clear this
384 * shared flag after we've set it. Note that this flag may
385 * remain set after the signal we return is ignored or
386 * handled. That doesn't matter because its only purpose
387 * is to alert stop-signal processing code when another
388 * processor has come along and cleared the flag.
389 */
390 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
391 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if ( signr &&
394 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
395 info->si_sys_private){
396 /*
397 * Release the siglock to ensure proper locking order
398 * of timer locks outside of siglocks. Note, we leave
399 * irqs disabled here, since the posix-timers code is
400 * about to disable them again anyway.
401 */
402 spin_unlock(&tsk->sighand->siglock);
403 do_schedule_next_timer(info);
404 spin_lock(&tsk->sighand->siglock);
405 }
406 return signr;
407}
408
409/*
410 * Tell a process that it has a new active signal..
411 *
412 * NOTE! we rely on the previous spin_lock to
413 * lock interrupts for us! We can only be called with
414 * "siglock" held, and the local interrupt must
415 * have been disabled when that got acquired!
416 *
417 * No need to set need_resched since signal event passing
418 * goes through ->blocked
419 */
420void signal_wake_up(struct task_struct *t, int resume)
421{
422 unsigned int mask;
423
424 set_tsk_thread_flag(t, TIF_SIGPENDING);
425
426 /*
427 * For SIGKILL, we want to wake it up in the stopped/traced case.
428 * We don't check t->state here because there is a race with it
429 * executing another processor and just now entering stopped state.
430 * By using wake_up_state, we ensure the process will wake up and
431 * handle its death signal.
432 */
433 mask = TASK_INTERRUPTIBLE;
434 if (resume)
435 mask |= TASK_STOPPED | TASK_TRACED;
436 if (!wake_up_state(t, mask))
437 kick_process(t);
438}
439
440/*
441 * Remove signals in mask from the pending set and queue.
442 * Returns 1 if any signals were found.
443 *
444 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800445 *
446 * This version takes a sigset mask and looks at all signals,
447 * not just those in the first mask word.
448 */
449static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
450{
451 struct sigqueue *q, *n;
452 sigset_t m;
453
454 sigandsets(&m, mask, &s->signal);
455 if (sigisemptyset(&m))
456 return 0;
457
458 signandsets(&s->signal, &s->signal, mask);
459 list_for_each_entry_safe(q, n, &s->list, list) {
460 if (sigismember(mask, q->info.si_signo)) {
461 list_del_init(&q->list);
462 __sigqueue_free(q);
463 }
464 }
465 return 1;
466}
467/*
468 * Remove signals in mask from the pending set and queue.
469 * Returns 1 if any signals were found.
470 *
471 * All callers must be holding the siglock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 */
473static int rm_from_queue(unsigned long mask, struct sigpending *s)
474{
475 struct sigqueue *q, *n;
476
477 if (!sigtestsetmask(&s->signal, mask))
478 return 0;
479
480 sigdelsetmask(&s->signal, mask);
481 list_for_each_entry_safe(q, n, &s->list, list) {
482 if (q->info.si_signo < SIGRTMIN &&
483 (mask & sigmask(q->info.si_signo))) {
484 list_del_init(&q->list);
485 __sigqueue_free(q);
486 }
487 }
488 return 1;
489}
490
491/*
492 * Bad permissions for sending the signal
493 */
494static int check_kill_permission(int sig, struct siginfo *info,
495 struct task_struct *t)
496{
497 int error = -EINVAL;
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700498 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400500
501 error = audit_signal_info(sig, t); /* Let audit system see the signal */
502 if (error)
503 return error;
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 error = -EPERM;
Oleg Nesterov621d3122005-10-30 15:03:45 -0800506 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 && ((sig != SIGCONT) ||
Cedric Le Goater937949d2006-12-08 02:37:54 -0800508 (process_session(current) != process_session(t)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
510 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
511 && !capable(CAP_KILL))
512 return error;
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100513
Amy Griffise54dc242007-03-29 18:01:04 -0400514 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
517/* forward decl */
Oleg Nesterova1d5e212006-03-28 16:11:29 -0800518static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520/*
521 * Handle magic process-wide effects of stop/continue signals.
522 * Unlike the signal actions, these happen immediately at signal-generation
523 * time regardless of blocking, ignoring, or handling. This does the
524 * actual continuing for SIGCONT, but not the actual stopping for stop
525 * signals. The process stop is done as a signal action for SIG_DFL.
526 */
527static void handle_stop_signal(int sig, struct task_struct *p)
528{
529 struct task_struct *t;
530
Bhavesh P. Davdadd12f482005-08-17 12:26:33 -0600531 if (p->signal->flags & SIGNAL_GROUP_EXIT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 /*
533 * The process is in the middle of dying already.
534 */
535 return;
536
537 if (sig_kernel_stop(sig)) {
538 /*
539 * This is a stop signal. Remove SIGCONT from all queues.
540 */
541 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
542 t = p;
543 do {
544 rm_from_queue(sigmask(SIGCONT), &t->pending);
545 t = next_thread(t);
546 } while (t != p);
547 } else if (sig == SIGCONT) {
548 /*
549 * Remove all stop signals from all queues,
550 * and wake all threads.
551 */
552 if (unlikely(p->signal->group_stop_count > 0)) {
553 /*
554 * There was a group stop in progress. We'll
555 * pretend it finished before we got here. We are
556 * obliged to report it to the parent: if the
557 * SIGSTOP happened "after" this SIGCONT, then it
558 * would have cleared this pending SIGCONT. If it
559 * happened "before" this SIGCONT, then the parent
560 * got the SIGCHLD about the stop finishing before
561 * the continue happened. We do the notification
562 * now, and it's as if the stop had finished and
563 * the SIGCHLD was pending on entry to this kill.
564 */
565 p->signal->group_stop_count = 0;
566 p->signal->flags = SIGNAL_STOP_CONTINUED;
567 spin_unlock(&p->sighand->siglock);
Oleg Nesterova1d5e212006-03-28 16:11:29 -0800568 do_notify_parent_cldstop(p, CLD_STOPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 spin_lock(&p->sighand->siglock);
570 }
571 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
572 t = p;
573 do {
574 unsigned int state;
575 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
576
577 /*
578 * If there is a handler for SIGCONT, we must make
579 * sure that no thread returns to user mode before
580 * we post the signal, in case it was the only
581 * thread eligible to run the signal handler--then
582 * it must not do anything between resuming and
583 * running the handler. With the TIF_SIGPENDING
584 * flag set, the thread will pause and acquire the
585 * siglock that we hold now and until we've queued
586 * the pending signal.
587 *
588 * Wake up the stopped thread _after_ setting
589 * TIF_SIGPENDING
590 */
591 state = TASK_STOPPED;
592 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
593 set_tsk_thread_flag(t, TIF_SIGPENDING);
594 state |= TASK_INTERRUPTIBLE;
595 }
596 wake_up_state(t, state);
597
598 t = next_thread(t);
599 } while (t != p);
600
601 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
602 /*
603 * We were in fact stopped, and are now continued.
604 * Notify the parent with CLD_CONTINUED.
605 */
606 p->signal->flags = SIGNAL_STOP_CONTINUED;
607 p->signal->group_exit_code = 0;
608 spin_unlock(&p->sighand->siglock);
Oleg Nesterova1d5e212006-03-28 16:11:29 -0800609 do_notify_parent_cldstop(p, CLD_CONTINUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 spin_lock(&p->sighand->siglock);
611 } else {
612 /*
613 * We are not stopped, but there could be a stop
614 * signal in the middle of being processed after
615 * being removed from the queue. Clear that too.
616 */
617 p->signal->flags = 0;
618 }
619 } else if (sig == SIGKILL) {
620 /*
621 * Make sure that any pending stop signal already dequeued
622 * is undone by the wakeup for SIGKILL.
623 */
624 p->signal->flags = 0;
625 }
626}
627
628static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
629 struct sigpending *signals)
630{
631 struct sigqueue * q = NULL;
632 int ret = 0;
633
634 /*
635 * fast-pathed signals for kernel-internal things like SIGSTOP
636 * or SIGKILL.
637 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800638 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 goto out_set;
640
641 /* Real-time signals must be queued if sent by sigqueue, or
642 some other real-time mechanism. It is implementation
643 defined whether kill() does so. We attempt to do so, on
644 the principle of least surprise, but since kill is not
645 allowed to fail with EAGAIN when low on memory we just
646 make sure at least one signal gets delivered and don't
647 pass on the info struct. */
648
649 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
Oleg Nesterov621d3122005-10-30 15:03:45 -0800650 (is_si_special(info) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 info->si_code >= 0)));
652 if (q) {
653 list_add_tail(&q->list, &signals->list);
654 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800655 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 q->info.si_signo = sig;
657 q->info.si_errno = 0;
658 q->info.si_code = SI_USER;
659 q->info.si_pid = current->pid;
660 q->info.si_uid = current->uid;
661 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -0800662 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 q->info.si_signo = sig;
664 q->info.si_errno = 0;
665 q->info.si_code = SI_KERNEL;
666 q->info.si_pid = 0;
667 q->info.si_uid = 0;
668 break;
669 default:
670 copy_siginfo(&q->info, info);
671 break;
672 }
Oleg Nesterov621d3122005-10-30 15:03:45 -0800673 } else if (!is_si_special(info)) {
674 if (sig >= SIGRTMIN && info->si_code != SI_USER)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 /*
676 * Queue overflow, abort. We may abort if the signal was rt
677 * and sent by user using something other than kill().
678 */
679 return -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 }
681
682out_set:
683 sigaddset(&signals->signal, sig);
684 return ret;
685}
686
687#define LEGACY_QUEUE(sigptr, sig) \
688 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
689
690
691static int
692specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
693{
694 int ret = 0;
695
Eric Sesterhennfda8bd72006-04-02 13:44:47 +0200696 BUG_ON(!irqs_disabled());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 assert_spin_locked(&t->sighand->siglock);
698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 /* Short-circuit ignored signals. */
700 if (sig_ignored(t, sig))
701 goto out;
702
703 /* Support queueing exactly one non-rt signal, so that we
704 can get more detailed information about the cause of
705 the signal. */
706 if (LEGACY_QUEUE(&t->pending, sig))
707 goto out;
708
709 ret = send_signal(sig, info, t, &t->pending);
710 if (!ret && !sigismember(&t->blocked, sig))
711 signal_wake_up(t, sig == SIGKILL);
712out:
713 return ret;
714}
715
716/*
717 * Force a signal that the process can't ignore: if necessary
718 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -0700719 *
720 * Note: If we unblock the signal, we always reset it to SIG_DFL,
721 * since we do not want to have a signal handler that was blocked
722 * be invoked when user space had explicitly blocked it.
723 *
724 * We don't want to have recursive SIGSEGV's etc, for example.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726int
727force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
728{
729 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -0700730 int ret, blocked, ignored;
731 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -0700734 action = &t->sighand->action[sig-1];
735 ignored = action->sa.sa_handler == SIG_IGN;
736 blocked = sigismember(&t->blocked, sig);
737 if (blocked || ignored) {
738 action->sa.sa_handler = SIG_DFL;
739 if (blocked) {
740 sigdelset(&t->blocked, sig);
741 recalc_sigpending_tsk(t);
742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
744 ret = specific_send_sig_info(sig, info, t);
745 spin_unlock_irqrestore(&t->sighand->siglock, flags);
746
747 return ret;
748}
749
750void
751force_sig_specific(int sig, struct task_struct *t)
752{
Paul E. McKenneyb0423a02005-10-30 15:03:46 -0800753 force_sig_info(sig, SEND_SIG_FORCED, t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
756/*
757 * Test if P wants to take SIG. After we've checked all threads with this,
758 * it's equivalent to finding no threads not blocking SIG. Any threads not
759 * blocking SIG were ruled out because they are not running and already
760 * have pending signals. Such threads will dequeue from the shared queue
761 * as soon as they're available, so putting the signal on the shared queue
762 * will be equivalent to sending it to one such thread.
763 */
Linus Torvalds188a1ea2005-09-23 13:22:21 -0700764static inline int wants_signal(int sig, struct task_struct *p)
765{
766 if (sigismember(&p->blocked, sig))
767 return 0;
768 if (p->flags & PF_EXITING)
769 return 0;
770 if (sig == SIGKILL)
771 return 1;
772 if (p->state & (TASK_STOPPED | TASK_TRACED))
773 return 0;
774 return task_curr(p) || !signal_pending(p);
775}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777static void
778__group_complete_signal(int sig, struct task_struct *p)
779{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 struct task_struct *t;
781
782 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 * Now find a thread we can wake up to take the signal off the queue.
784 *
785 * If the main thread wants the signal, it gets first crack.
786 * Probably the least surprising to the average bear.
787 */
Linus Torvalds188a1ea2005-09-23 13:22:21 -0700788 if (wants_signal(sig, p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 t = p;
790 else if (thread_group_empty(p))
791 /*
792 * There is just one thread and it does not need to be woken.
793 * It will dequeue unblocked signals before it runs again.
794 */
795 return;
796 else {
797 /*
798 * Otherwise try to find a suitable thread.
799 */
800 t = p->signal->curr_target;
801 if (t == NULL)
802 /* restart balancing at this thread */
803 t = p->signal->curr_target = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Linus Torvalds188a1ea2005-09-23 13:22:21 -0700805 while (!wants_signal(sig, t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 t = next_thread(t);
807 if (t == p->signal->curr_target)
808 /*
809 * No thread needs to be woken.
810 * Any eligible threads will see
811 * the signal in the queue soon.
812 */
813 return;
814 }
815 p->signal->curr_target = t;
816 }
817
818 /*
819 * Found a killable thread. If the signal will be fatal,
820 * then start taking the whole group down immediately.
821 */
822 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
823 !sigismember(&t->real_blocked, sig) &&
824 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
825 /*
826 * This signal will be fatal to the whole group.
827 */
828 if (!sig_kernel_coredump(sig)) {
829 /*
830 * Start a group exit and wake everybody up.
831 * This way we don't have other threads
832 * running and doing things after a slower
833 * thread has the fatal signal pending.
834 */
835 p->signal->flags = SIGNAL_GROUP_EXIT;
836 p->signal->group_exit_code = sig;
837 p->signal->group_stop_count = 0;
838 t = p;
839 do {
840 sigaddset(&t->pending.signal, SIGKILL);
841 signal_wake_up(t, 1);
842 t = next_thread(t);
843 } while (t != p);
844 return;
845 }
846
847 /*
848 * There will be a core dump. We make all threads other
849 * than the chosen one go into a group stop so that nothing
850 * happens until it gets scheduled, takes the signal off
851 * the shared queue, and does the core dump. This is a
852 * little more complicated than strictly necessary, but it
853 * keeps the signal state that winds up in the core dump
854 * unchanged from the death state, e.g. which thread had
855 * the core-dump signal unblocked.
856 */
857 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
858 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
859 p->signal->group_stop_count = 0;
860 p->signal->group_exit_task = t;
861 t = p;
862 do {
863 p->signal->group_stop_count++;
864 signal_wake_up(t, 0);
865 t = next_thread(t);
866 } while (t != p);
867 wake_up_process(p->signal->group_exit_task);
868 return;
869 }
870
871 /*
872 * The signal is already in the shared-pending queue.
873 * Tell the chosen thread to wake up and dequeue it.
874 */
875 signal_wake_up(t, sig == SIGKILL);
876 return;
877}
878
879int
880__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
881{
882 int ret = 0;
883
884 assert_spin_locked(&p->sighand->siglock);
885 handle_stop_signal(sig, p);
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 /* Short-circuit ignored signals. */
888 if (sig_ignored(p, sig))
889 return ret;
890
891 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
892 /* This is a non-RT signal and we already have one queued. */
893 return ret;
894
895 /*
896 * Put this signal on the shared-pending queue, or fail with EAGAIN.
897 * We always use the shared queue for process-wide signals,
898 * to avoid several races.
899 */
900 ret = send_signal(sig, info, p, &p->signal->shared_pending);
901 if (unlikely(ret))
902 return ret;
903
904 __group_complete_signal(sig, p);
905 return 0;
906}
907
908/*
909 * Nuke all other threads in the group.
910 */
911void zap_other_threads(struct task_struct *p)
912{
913 struct task_struct *t;
914
915 p->signal->flags = SIGNAL_GROUP_EXIT;
916 p->signal->group_stop_count = 0;
917
918 if (thread_group_empty(p))
919 return;
920
921 for (t = next_thread(p); t != p; t = next_thread(t)) {
922 /*
923 * Don't bother with already dead threads
924 */
925 if (t->exit_state)
926 continue;
927
Andrea Arcangeli30e0fca2005-10-30 15:02:38 -0800928 /* SIGKILL will be handled before any pending SIGSTOP */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 signal_wake_up(t, 1);
931 }
932}
933
934/*
Ingo Molnare56d0902006-01-08 01:01:37 -0800935 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 */
Oleg Nesterovf63ee722006-03-28 16:11:13 -0800937struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
938{
939 struct sighand_struct *sighand;
940
941 for (;;) {
942 sighand = rcu_dereference(tsk->sighand);
943 if (unlikely(sighand == NULL))
944 break;
945
946 spin_lock_irqsave(&sighand->siglock, *flags);
947 if (likely(sighand == tsk->sighand))
948 break;
949 spin_unlock_irqrestore(&sighand->siglock, *flags);
950 }
951
952 return sighand;
953}
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
956{
957 unsigned long flags;
958 int ret;
959
960 ret = check_kill_permission(sig, info, p);
Oleg Nesterovf63ee722006-03-28 16:11:13 -0800961
962 if (!ret && sig) {
963 ret = -ESRCH;
964 if (lock_task_sighand(p, &flags)) {
965 ret = __group_send_sig_info(sig, info, p);
966 unlock_task_sighand(p, &flags);
Ingo Molnare56d0902006-01-08 01:01:37 -0800967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
969
970 return ret;
971}
972
973/*
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -0700974 * kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 * control characters do (^C, ^Z etc)
976 */
977
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -0700978int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
980 struct task_struct *p = NULL;
981 int retval, success;
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 success = 0;
984 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -0700985 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 int err = group_send_sig_info(sig, info, p);
987 success |= !err;
988 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -0700989 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 return success ? 0 : retval;
991}
992
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -0700993int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
994{
995 int retval;
996
997 read_lock(&tasklist_lock);
998 retval = __kill_pgrp_info(sig, info, pgrp);
999 read_unlock(&tasklist_lock);
1000
1001 return retval;
1002}
1003
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001004int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005{
1006 int error;
1007 struct task_struct *p;
1008
Ingo Molnare56d0902006-01-08 01:01:37 -08001009 rcu_read_lock();
Oleg Nesterov0c12b512007-02-10 01:44:56 -08001010 if (unlikely(sig_needs_tasklist(sig)))
Ingo Molnare56d0902006-01-08 01:01:37 -08001011 read_lock(&tasklist_lock);
Oleg Nesterov0c12b512007-02-10 01:44:56 -08001012
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001013 p = pid_task(pid, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 error = -ESRCH;
1015 if (p)
1016 error = group_send_sig_info(sig, info, p);
Oleg Nesterov0c12b512007-02-10 01:44:56 -08001017
1018 if (unlikely(sig_needs_tasklist(sig)))
Ingo Molnare56d0902006-01-08 01:01:37 -08001019 read_unlock(&tasklist_lock);
1020 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return error;
1022}
1023
Matthew Wilcoxc3de4b32007-02-09 08:11:47 -07001024int
1025kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001026{
1027 int error;
1028 rcu_read_lock();
1029 error = kill_pid_info(sig, info, find_pid(pid));
1030 rcu_read_unlock();
1031 return error;
1032}
1033
Eric W. Biederman2425c082006-10-02 02:17:28 -07001034/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1035int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
David Quigley8f95dc52006-06-30 01:55:47 -07001036 uid_t uid, uid_t euid, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001037{
1038 int ret = -EINVAL;
1039 struct task_struct *p;
1040
1041 if (!valid_signal(sig))
1042 return ret;
1043
1044 read_lock(&tasklist_lock);
Eric W. Biederman2425c082006-10-02 02:17:28 -07001045 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001046 if (!p) {
1047 ret = -ESRCH;
1048 goto out_unlock;
1049 }
Oleg Nesterov0811af22006-01-08 01:03:09 -08001050 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
Harald Welte46113832005-10-10 19:44:29 +02001051 && (euid != p->suid) && (euid != p->uid)
1052 && (uid != p->suid) && (uid != p->uid)) {
1053 ret = -EPERM;
1054 goto out_unlock;
1055 }
David Quigley8f95dc52006-06-30 01:55:47 -07001056 ret = security_task_kill(p, info, sig, secid);
1057 if (ret)
1058 goto out_unlock;
Harald Welte46113832005-10-10 19:44:29 +02001059 if (sig && p->sighand) {
1060 unsigned long flags;
1061 spin_lock_irqsave(&p->sighand->siglock, flags);
1062 ret = __group_send_sig_info(sig, info, p);
1063 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1064 }
1065out_unlock:
1066 read_unlock(&tasklist_lock);
1067 return ret;
1068}
Eric W. Biederman2425c082006-10-02 02:17:28 -07001069EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
1071/*
1072 * kill_something_info() interprets pid in interesting ways just like kill(2).
1073 *
1074 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1075 * is probably wrong. Should make it like BSD or SYSV.
1076 */
1077
1078static int kill_something_info(int sig, struct siginfo *info, int pid)
1079{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001080 int ret;
1081 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 if (!pid) {
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001083 ret = kill_pgrp_info(sig, info, task_pgrp(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 } else if (pid == -1) {
1085 int retval = 0, count = 0;
1086 struct task_struct * p;
1087
1088 read_lock(&tasklist_lock);
1089 for_each_process(p) {
1090 if (p->pid > 1 && p->tgid != current->tgid) {
1091 int err = group_send_sig_info(sig, info, p);
1092 ++count;
1093 if (err != -EPERM)
1094 retval = err;
1095 }
1096 }
1097 read_unlock(&tasklist_lock);
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001098 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 } else if (pid < 0) {
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001100 ret = kill_pgrp_info(sig, info, find_pid(-pid));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 } else {
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001102 ret = kill_pid_info(sig, info, find_pid(pid));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001104 rcu_read_unlock();
1105 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106}
1107
1108/*
1109 * These are for backward compatibility with the rest of the kernel source.
1110 */
1111
1112/*
1113 * These two are the most common entry points. They send a signal
1114 * just to the specific thread.
1115 */
1116int
1117send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1118{
1119 int ret;
1120 unsigned long flags;
1121
1122 /*
1123 * Make sure legacy kernel users don't send in bad values
1124 * (normal paths check this in check_kill_permission).
1125 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001126 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 return -EINVAL;
1128
1129 /*
1130 * We need the tasklist lock even for the specific
1131 * thread case (when we don't need to follow the group
1132 * lists) in order to avoid races with "p->sighand"
1133 * going away or changing from under us.
1134 */
1135 read_lock(&tasklist_lock);
1136 spin_lock_irqsave(&p->sighand->siglock, flags);
1137 ret = specific_send_sig_info(sig, info, p);
1138 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1139 read_unlock(&tasklist_lock);
1140 return ret;
1141}
1142
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001143#define __si_special(priv) \
1144 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146int
1147send_sig(int sig, struct task_struct *p, int priv)
1148{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001149 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150}
1151
1152/*
1153 * This is the entry point for "process-wide" signals.
1154 * They will go to an appropriate thread in the thread group.
1155 */
1156int
1157send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1158{
1159 int ret;
1160 read_lock(&tasklist_lock);
1161 ret = group_send_sig_info(sig, info, p);
1162 read_unlock(&tasklist_lock);
1163 return ret;
1164}
1165
1166void
1167force_sig(int sig, struct task_struct *p)
1168{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001169 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171
1172/*
1173 * When things go south during signal handling, we
1174 * will force a SIGSEGV. And if the signal that caused
1175 * the problem was already a SIGSEGV, we'll want to
1176 * make sure we don't even try to deliver the signal..
1177 */
1178int
1179force_sigsegv(int sig, struct task_struct *p)
1180{
1181 if (sig == SIGSEGV) {
1182 unsigned long flags;
1183 spin_lock_irqsave(&p->sighand->siglock, flags);
1184 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1185 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1186 }
1187 force_sig(SIGSEGV, p);
1188 return 0;
1189}
1190
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001191int kill_pgrp(struct pid *pid, int sig, int priv)
1192{
1193 return kill_pgrp_info(sig, __si_special(priv), pid);
1194}
1195EXPORT_SYMBOL(kill_pgrp);
1196
1197int kill_pid(struct pid *pid, int sig, int priv)
1198{
1199 return kill_pid_info(sig, __si_special(priv), pid);
1200}
1201EXPORT_SYMBOL(kill_pid);
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203int
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204kill_proc(pid_t pid, int sig, int priv)
1205{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001206 return kill_proc_info(sig, __si_special(priv), pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207}
1208
1209/*
1210 * These functions support sending signals using preallocated sigqueue
1211 * structures. This is needed "because realtime applications cannot
1212 * afford to lose notifications of asynchronous events, like timer
1213 * expirations or I/O completions". In the case of Posix Timers
1214 * we allocate the sigqueue structure from the timer_create. If this
1215 * allocation fails we are able to report the failure to the application
1216 * with an EAGAIN error.
1217 */
1218
1219struct sigqueue *sigqueue_alloc(void)
1220{
1221 struct sigqueue *q;
1222
1223 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1224 q->flags |= SIGQUEUE_PREALLOC;
1225 return(q);
1226}
1227
1228void sigqueue_free(struct sigqueue *q)
1229{
1230 unsigned long flags;
1231 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1232 /*
1233 * If the signal is still pending remove it from the
1234 * pending queue.
1235 */
1236 if (unlikely(!list_empty(&q->list))) {
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001237 spinlock_t *lock = &current->sighand->siglock;
1238 read_lock(&tasklist_lock);
1239 spin_lock_irqsave(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 if (!list_empty(&q->list))
1241 list_del_init(&q->list);
Oleg Nesterov19a4fcb2005-10-30 15:02:17 -08001242 spin_unlock_irqrestore(lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 read_unlock(&tasklist_lock);
1244 }
1245 q->flags &= ~SIGQUEUE_PREALLOC;
1246 __sigqueue_free(q);
1247}
1248
Oleg Nesterov54767902006-03-28 16:11:30 -08001249int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
1251 unsigned long flags;
1252 int ret = 0;
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001255
1256 /*
1257 * The rcu based delayed sighand destroy makes it possible to
1258 * run this without tasklist lock held. The task struct itself
1259 * cannot go away as create_timer did get_task_struct().
1260 *
1261 * We return -1, when the task is marked exiting, so
1262 * posix_timer_event can redirect it to the group leader
1263 */
1264 rcu_read_lock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001265
Oleg Nesterov54767902006-03-28 16:11:30 -08001266 if (!likely(lock_task_sighand(p, &flags))) {
Oleg Nesterove752dd62005-09-06 15:17:42 -07001267 ret = -1;
1268 goto out_err;
1269 }
1270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 if (unlikely(!list_empty(&q->list))) {
1272 /*
1273 * If an SI_TIMER entry is already queue just increment
1274 * the overrun count.
1275 */
Oleg Nesterov54767902006-03-28 16:11:30 -08001276 BUG_ON(q->info.si_code != SI_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 q->info.si_overrun++;
1278 goto out;
Oleg Nesterove752dd62005-09-06 15:17:42 -07001279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 /* Short-circuit ignored signals. */
1281 if (sig_ignored(p, sig)) {
1282 ret = 1;
1283 goto out;
1284 }
1285
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 list_add_tail(&q->list, &p->pending.list);
1287 sigaddset(&p->pending.signal, sig);
1288 if (!sigismember(&p->blocked, sig))
1289 signal_wake_up(p, sig == SIGKILL);
1290
1291out:
Oleg Nesterov54767902006-03-28 16:11:30 -08001292 unlock_task_sighand(p, &flags);
Oleg Nesterove752dd62005-09-06 15:17:42 -07001293out_err:
Ingo Molnare56d0902006-01-08 01:01:37 -08001294 rcu_read_unlock();
Oleg Nesterove752dd62005-09-06 15:17:42 -07001295
1296 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297}
1298
1299int
1300send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1301{
1302 unsigned long flags;
1303 int ret = 0;
1304
1305 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Ingo Molnare56d0902006-01-08 01:01:37 -08001306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 read_lock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001308 /* Since it_lock is held, p->sighand cannot be NULL. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 spin_lock_irqsave(&p->sighand->siglock, flags);
1310 handle_stop_signal(sig, p);
1311
1312 /* Short-circuit ignored signals. */
1313 if (sig_ignored(p, sig)) {
1314 ret = 1;
1315 goto out;
1316 }
1317
1318 if (unlikely(!list_empty(&q->list))) {
1319 /*
1320 * If an SI_TIMER entry is already queue just increment
1321 * the overrun count. Other uses should not try to
1322 * send the signal multiple times.
1323 */
Eric Sesterhennfda8bd72006-04-02 13:44:47 +02001324 BUG_ON(q->info.si_code != SI_TIMER);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 q->info.si_overrun++;
1326 goto out;
1327 }
1328
1329 /*
1330 * Put this signal on the shared-pending queue.
1331 * We always use the shared queue for process-wide signals,
1332 * to avoid several races.
1333 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 list_add_tail(&q->list, &p->signal->shared_pending.list);
1335 sigaddset(&p->signal->shared_pending.signal, sig);
1336
1337 __group_complete_signal(sig, p);
1338out:
1339 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1340 read_unlock(&tasklist_lock);
Ingo Molnare56d0902006-01-08 01:01:37 -08001341 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342}
1343
1344/*
1345 * Wake up any threads in the parent blocked in wait* syscalls.
1346 */
1347static inline void __wake_up_parent(struct task_struct *p,
1348 struct task_struct *parent)
1349{
1350 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1351}
1352
1353/*
1354 * Let a parent know about the death of a child.
1355 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1356 */
1357
1358void do_notify_parent(struct task_struct *tsk, int sig)
1359{
1360 struct siginfo info;
1361 unsigned long flags;
1362 struct sighand_struct *psig;
1363
1364 BUG_ON(sig == -1);
1365
1366 /* do_notify_parent_cldstop should have been called instead. */
1367 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1368
1369 BUG_ON(!tsk->ptrace &&
1370 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1371
1372 info.si_signo = sig;
1373 info.si_errno = 0;
1374 info.si_pid = tsk->pid;
1375 info.si_uid = tsk->uid;
1376
1377 /* FIXME: find out whether or not this is supposed to be c*time. */
1378 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1379 tsk->signal->utime));
1380 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1381 tsk->signal->stime));
1382
1383 info.si_status = tsk->exit_code & 0x7f;
1384 if (tsk->exit_code & 0x80)
1385 info.si_code = CLD_DUMPED;
1386 else if (tsk->exit_code & 0x7f)
1387 info.si_code = CLD_KILLED;
1388 else {
1389 info.si_code = CLD_EXITED;
1390 info.si_status = tsk->exit_code >> 8;
1391 }
1392
1393 psig = tsk->parent->sighand;
1394 spin_lock_irqsave(&psig->siglock, flags);
Oleg Nesterov7ed01752005-11-10 17:22:18 +03001395 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1397 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1398 /*
1399 * We are exiting and our parent doesn't care. POSIX.1
1400 * defines special semantics for setting SIGCHLD to SIG_IGN
1401 * or setting the SA_NOCLDWAIT flag: we should be reaped
1402 * automatically and not left for our parent's wait4 call.
1403 * Rather than having the parent do it as a magic kind of
1404 * signal handler, we just set this to tell do_exit that we
1405 * can be cleaned up without becoming a zombie. Note that
1406 * we still call __wake_up_parent in this case, because a
1407 * blocked sys_wait4 might now return -ECHILD.
1408 *
1409 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1410 * is implementation-defined: we do (if you don't want
1411 * it, just use SIG_IGN instead).
1412 */
1413 tsk->exit_signal = -1;
1414 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1415 sig = 0;
1416 }
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001417 if (valid_signal(sig) && sig > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 __group_send_sig_info(sig, &info, tsk->parent);
1419 __wake_up_parent(tsk, tsk->parent);
1420 spin_unlock_irqrestore(&psig->siglock, flags);
1421}
1422
Oleg Nesterova1d5e212006-03-28 16:11:29 -08001423static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
1425 struct siginfo info;
1426 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001427 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 struct sighand_struct *sighand;
1429
Oleg Nesterova1d5e212006-03-28 16:11:29 -08001430 if (tsk->ptrace & PT_PTRACED)
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001431 parent = tsk->parent;
1432 else {
1433 tsk = tsk->group_leader;
1434 parent = tsk->real_parent;
1435 }
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 info.si_signo = SIGCHLD;
1438 info.si_errno = 0;
1439 info.si_pid = tsk->pid;
1440 info.si_uid = tsk->uid;
1441
1442 /* FIXME: find out whether or not this is supposed to be c*time. */
1443 info.si_utime = cputime_to_jiffies(tsk->utime);
1444 info.si_stime = cputime_to_jiffies(tsk->stime);
1445
1446 info.si_code = why;
1447 switch (why) {
1448 case CLD_CONTINUED:
1449 info.si_status = SIGCONT;
1450 break;
1451 case CLD_STOPPED:
1452 info.si_status = tsk->signal->group_exit_code & 0x7f;
1453 break;
1454 case CLD_TRAPPED:
1455 info.si_status = tsk->exit_code & 0x7f;
1456 break;
1457 default:
1458 BUG();
1459 }
1460
1461 sighand = parent->sighand;
1462 spin_lock_irqsave(&sighand->siglock, flags);
1463 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1464 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1465 __group_send_sig_info(SIGCHLD, &info, parent);
1466 /*
1467 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1468 */
1469 __wake_up_parent(tsk, parent);
1470 spin_unlock_irqrestore(&sighand->siglock, flags);
1471}
1472
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001473static inline int may_ptrace_stop(void)
1474{
1475 if (!likely(current->ptrace & PT_PTRACED))
1476 return 0;
1477
1478 if (unlikely(current->parent == current->real_parent &&
1479 (current->ptrace & PT_ATTACHED)))
1480 return 0;
1481
1482 if (unlikely(current->signal == current->parent->signal) &&
1483 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1484 return 0;
1485
1486 /*
1487 * Are we in the middle of do_coredump?
1488 * If so and our tracer is also part of the coredump stopping
1489 * is a deadlock situation, and pointless because our tracer
1490 * is dead so don't allow us to stop.
1491 * If SIGKILL was already sent before the caller unlocked
1492 * ->siglock we must see ->core_waiters != 0. Otherwise it
1493 * is safe to enter schedule().
1494 */
1495 if (unlikely(current->mm->core_waiters) &&
1496 unlikely(current->mm == current->parent->mm))
1497 return 0;
1498
1499 return 1;
1500}
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502/*
1503 * This must be called with current->sighand->siglock held.
1504 *
1505 * This should be the path for all ptrace stops.
1506 * We always set current->last_siginfo while stopped here.
1507 * That makes it a way to test a stopped process for
1508 * being ptrace-stopped vs being job-control-stopped.
1509 *
1510 * If we actually decide not to stop at all because the tracer is gone,
1511 * we leave nostop_code in current->exit_code.
1512 */
1513static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1514{
1515 /*
1516 * If there is a group stop in progress,
1517 * we must participate in the bookkeeping.
1518 */
1519 if (current->signal->group_stop_count > 0)
1520 --current->signal->group_stop_count;
1521
1522 current->last_siginfo = info;
1523 current->exit_code = exit_code;
1524
1525 /* Let the debugger run. */
1526 set_current_state(TASK_TRACED);
1527 spin_unlock_irq(&current->sighand->siglock);
Pavel Machek85b6bce2006-03-31 02:30:06 -08001528 try_to_freeze();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 read_lock(&tasklist_lock);
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001530 if (may_ptrace_stop()) {
Oleg Nesterova1d5e212006-03-28 16:11:29 -08001531 do_notify_parent_cldstop(current, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 read_unlock(&tasklist_lock);
1533 schedule();
1534 } else {
1535 /*
1536 * By the time we got the lock, our tracer went away.
1537 * Don't stop here.
1538 */
1539 read_unlock(&tasklist_lock);
1540 set_current_state(TASK_RUNNING);
1541 current->exit_code = nostop_code;
1542 }
1543
1544 /*
1545 * We are back. Now reacquire the siglock before touching
1546 * last_siginfo, so that we are sure to have synchronized with
1547 * any signal-sending on another CPU that wants to examine it.
1548 */
1549 spin_lock_irq(&current->sighand->siglock);
1550 current->last_siginfo = NULL;
1551
1552 /*
1553 * Queued signals ignored us while we were stopped for tracing.
1554 * So check for any that we should take before resuming user mode.
1555 */
1556 recalc_sigpending();
1557}
1558
1559void ptrace_notify(int exit_code)
1560{
1561 siginfo_t info;
1562
1563 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1564
1565 memset(&info, 0, sizeof info);
1566 info.si_signo = SIGTRAP;
1567 info.si_code = exit_code;
1568 info.si_pid = current->pid;
1569 info.si_uid = current->uid;
1570
1571 /* Let the debugger run. */
1572 spin_lock_irq(&current->sighand->siglock);
1573 ptrace_stop(exit_code, 0, &info);
1574 spin_unlock_irq(&current->sighand->siglock);
1575}
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577static void
1578finish_stop(int stop_count)
1579{
1580 /*
1581 * If there are no other threads in the group, or if there is
1582 * a group stop in progress and we are the last to stop,
1583 * report to the parent. When ptraced, every thread reports itself.
1584 */
Oleg Nesterova1d5e212006-03-28 16:11:29 -08001585 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1586 read_lock(&tasklist_lock);
1587 do_notify_parent_cldstop(current, CLD_STOPPED);
1588 read_unlock(&tasklist_lock);
1589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Rafael J. Wysocki3df494a2006-12-13 00:34:28 -08001591 do {
1592 schedule();
1593 } while (try_to_freeze());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 /*
1595 * Now we don't run again until continued.
1596 */
1597 current->exit_code = 0;
1598}
1599
1600/*
1601 * This performs the stopping for SIGSTOP and other stop signals.
1602 * We have to stop all threads in the thread group.
1603 * Returns nonzero if we've actually stopped and released the siglock.
1604 * Returns zero if we didn't stop and still hold the siglock.
1605 */
Oleg Nesterova122b342006-03-28 16:11:22 -08001606static int do_signal_stop(int signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607{
1608 struct signal_struct *sig = current->signal;
Oleg Nesterovdac27f42006-03-28 16:11:28 -08001609 int stop_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1612 return 0;
1613
1614 if (sig->group_stop_count > 0) {
1615 /*
1616 * There is a group stop in progress. We don't need to
1617 * start another one.
1618 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 stop_count = --sig->group_stop_count;
Oleg Nesterovdac27f42006-03-28 16:11:28 -08001620 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 * There is no group stop already in progress.
Oleg Nesterova122b342006-03-28 16:11:22 -08001623 * We must initiate one now.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 */
1625 struct task_struct *t;
1626
Oleg Nesterova122b342006-03-28 16:11:22 -08001627 sig->group_exit_code = signr;
1628
1629 stop_count = 0;
1630 for (t = next_thread(current); t != current; t = next_thread(t))
1631 /*
1632 * Setting state to TASK_STOPPED for a group
1633 * stop is always done with the siglock held,
1634 * so this check has no races.
1635 */
1636 if (!t->exit_state &&
1637 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1638 stop_count++;
1639 signal_wake_up(t, 0);
1640 }
1641 sig->group_stop_count = stop_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 }
1643
Oleg Nesterovdac27f42006-03-28 16:11:28 -08001644 if (stop_count == 0)
1645 sig->flags = SIGNAL_STOP_STOPPED;
1646 current->exit_code = sig->group_exit_code;
1647 __set_current_state(TASK_STOPPED);
1648
1649 spin_unlock_irq(&current->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 finish_stop(stop_count);
1651 return 1;
1652}
1653
1654/*
1655 * Do appropriate magic when group_stop_count > 0.
1656 * We return nonzero if we stopped, after releasing the siglock.
1657 * We return zero if we still hold the siglock and should look
1658 * for another signal without checking group_stop_count again.
1659 */
Arjan van de Ven858119e2006-01-14 13:20:43 -08001660static int handle_group_stop(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661{
1662 int stop_count;
1663
1664 if (current->signal->group_exit_task == current) {
1665 /*
1666 * Group stop is so we can do a core dump,
1667 * We are the initiating thread, so get on with it.
1668 */
1669 current->signal->group_exit_task = NULL;
1670 return 0;
1671 }
1672
1673 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1674 /*
1675 * Group stop is so another thread can do a core dump,
1676 * or else we are racing against a death signal.
1677 * Just punt the stop so we can get the next signal.
1678 */
1679 return 0;
1680
1681 /*
1682 * There is a group stop in progress. We stop
1683 * without any associated signal being in our queue.
1684 */
1685 stop_count = --current->signal->group_stop_count;
1686 if (stop_count == 0)
1687 current->signal->flags = SIGNAL_STOP_STOPPED;
1688 current->exit_code = current->signal->group_exit_code;
1689 set_current_state(TASK_STOPPED);
1690 spin_unlock_irq(&current->sighand->siglock);
1691 finish_stop(stop_count);
1692 return 1;
1693}
1694
1695int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1696 struct pt_regs *regs, void *cookie)
1697{
1698 sigset_t *mask = &current->blocked;
1699 int signr = 0;
1700
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08001701 try_to_freeze();
1702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703relock:
1704 spin_lock_irq(&current->sighand->siglock);
1705 for (;;) {
1706 struct k_sigaction *ka;
1707
1708 if (unlikely(current->signal->group_stop_count > 0) &&
1709 handle_group_stop())
1710 goto relock;
1711
1712 signr = dequeue_signal(current, mask, info);
1713
1714 if (!signr)
1715 break; /* will return 0 */
1716
1717 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1718 ptrace_signal_deliver(regs, cookie);
1719
1720 /* Let the debugger run. */
1721 ptrace_stop(signr, signr, info);
1722
Roland McGrathe57a5052006-04-12 16:30:20 -07001723 /* We're back. Did the debugger cancel the sig? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 signr = current->exit_code;
Roland McGrathe57a5052006-04-12 16:30:20 -07001725 if (signr == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 continue;
1727
1728 current->exit_code = 0;
1729
1730 /* Update the siginfo structure if the signal has
1731 changed. If the debugger wanted something
1732 specific in the siginfo structure then it should
1733 have updated *info via PTRACE_SETSIGINFO. */
1734 if (signr != info->si_signo) {
1735 info->si_signo = signr;
1736 info->si_errno = 0;
1737 info->si_code = SI_USER;
1738 info->si_pid = current->parent->pid;
1739 info->si_uid = current->parent->uid;
1740 }
1741
1742 /* If the (new) signal is now blocked, requeue it. */
1743 if (sigismember(&current->blocked, signr)) {
1744 specific_send_sig_info(signr, info, current);
1745 continue;
1746 }
1747 }
1748
1749 ka = &current->sighand->action[signr-1];
1750 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1751 continue;
1752 if (ka->sa.sa_handler != SIG_DFL) {
1753 /* Run the handler. */
1754 *return_ka = *ka;
1755
1756 if (ka->sa.sa_flags & SA_ONESHOT)
1757 ka->sa.sa_handler = SIG_DFL;
1758
1759 break; /* will return non-zero "signr" value */
1760 }
1761
1762 /*
1763 * Now we are doing the default action for this signal.
1764 */
1765 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1766 continue;
1767
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08001768 /*
1769 * Init of a pid space gets no signals it doesn't want from
1770 * within that pid space. It can of course get signals from
1771 * its parent pid space.
1772 */
1773 if (current == child_reaper(current))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 continue;
1775
1776 if (sig_kernel_stop(signr)) {
1777 /*
1778 * The default action is to stop all threads in
1779 * the thread group. The job control signals
1780 * do nothing in an orphaned pgrp, but SIGSTOP
1781 * always works. Note that siglock needs to be
1782 * dropped during the call to is_orphaned_pgrp()
1783 * because of lock ordering with tasklist_lock.
1784 * This allows an intervening SIGCONT to be posted.
1785 * We need to check for that and bail out if necessary.
1786 */
1787 if (signr != SIGSTOP) {
1788 spin_unlock_irq(&current->sighand->siglock);
1789
1790 /* signals can be posted during this window */
1791
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08001792 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 goto relock;
1794
1795 spin_lock_irq(&current->sighand->siglock);
1796 }
1797
1798 if (likely(do_signal_stop(signr))) {
1799 /* It released the siglock. */
1800 goto relock;
1801 }
1802
1803 /*
1804 * We didn't actually stop, due to a race
1805 * with SIGCONT or something like that.
1806 */
1807 continue;
1808 }
1809
1810 spin_unlock_irq(&current->sighand->siglock);
1811
1812 /*
1813 * Anything else is fatal, maybe with a core dump.
1814 */
1815 current->flags |= PF_SIGNALED;
1816 if (sig_kernel_coredump(signr)) {
1817 /*
1818 * If it was able to dump core, this kills all
1819 * other threads in the group and synchronizes with
1820 * their demise. If we lost the race with another
1821 * thread getting here, it set group_exit_code
1822 * first and our do_group_exit call below will use
1823 * that value and ignore the one we pass it.
1824 */
1825 do_coredump((long)signr, signr, regs);
1826 }
1827
1828 /*
1829 * Death signals, no core dump.
1830 */
1831 do_group_exit(signr);
1832 /* NOTREACHED */
1833 }
1834 spin_unlock_irq(&current->sighand->siglock);
1835 return signr;
1836}
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838EXPORT_SYMBOL(recalc_sigpending);
1839EXPORT_SYMBOL_GPL(dequeue_signal);
1840EXPORT_SYMBOL(flush_signals);
1841EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842EXPORT_SYMBOL(kill_proc);
1843EXPORT_SYMBOL(ptrace_notify);
1844EXPORT_SYMBOL(send_sig);
1845EXPORT_SYMBOL(send_sig_info);
1846EXPORT_SYMBOL(sigprocmask);
1847EXPORT_SYMBOL(block_all_signals);
1848EXPORT_SYMBOL(unblock_all_signals);
1849
1850
1851/*
1852 * System call entry points.
1853 */
1854
1855asmlinkage long sys_restart_syscall(void)
1856{
1857 struct restart_block *restart = &current_thread_info()->restart_block;
1858 return restart->fn(restart);
1859}
1860
1861long do_no_restart_syscall(struct restart_block *param)
1862{
1863 return -EINTR;
1864}
1865
1866/*
1867 * We don't need to get the kernel lock - this is all local to this
1868 * particular thread.. (and that's good, because this is _heavily_
1869 * used by various programs)
1870 */
1871
1872/*
1873 * This is also useful for kernel threads that want to temporarily
1874 * (or permanently) block certain signals.
1875 *
1876 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1877 * interface happily blocks "unblockable" signals like SIGKILL
1878 * and friends.
1879 */
1880int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1881{
1882 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08001885 if (oldset)
1886 *oldset = current->blocked;
1887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 error = 0;
1889 switch (how) {
1890 case SIG_BLOCK:
1891 sigorsets(&current->blocked, &current->blocked, set);
1892 break;
1893 case SIG_UNBLOCK:
1894 signandsets(&current->blocked, &current->blocked, set);
1895 break;
1896 case SIG_SETMASK:
1897 current->blocked = *set;
1898 break;
1899 default:
1900 error = -EINVAL;
1901 }
1902 recalc_sigpending();
1903 spin_unlock_irq(&current->sighand->siglock);
Oleg Nesterova26fd332006-03-23 03:00:49 -08001904
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 return error;
1906}
1907
1908asmlinkage long
1909sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1910{
1911 int error = -EINVAL;
1912 sigset_t old_set, new_set;
1913
1914 /* XXX: Don't preclude handling different sized sigset_t's. */
1915 if (sigsetsize != sizeof(sigset_t))
1916 goto out;
1917
1918 if (set) {
1919 error = -EFAULT;
1920 if (copy_from_user(&new_set, set, sizeof(*set)))
1921 goto out;
1922 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1923
1924 error = sigprocmask(how, &new_set, &old_set);
1925 if (error)
1926 goto out;
1927 if (oset)
1928 goto set_old;
1929 } else if (oset) {
1930 spin_lock_irq(&current->sighand->siglock);
1931 old_set = current->blocked;
1932 spin_unlock_irq(&current->sighand->siglock);
1933
1934 set_old:
1935 error = -EFAULT;
1936 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1937 goto out;
1938 }
1939 error = 0;
1940out:
1941 return error;
1942}
1943
1944long do_sigpending(void __user *set, unsigned long sigsetsize)
1945{
1946 long error = -EINVAL;
1947 sigset_t pending;
1948
1949 if (sigsetsize > sizeof(sigset_t))
1950 goto out;
1951
1952 spin_lock_irq(&current->sighand->siglock);
1953 sigorsets(&pending, &current->pending.signal,
1954 &current->signal->shared_pending.signal);
1955 spin_unlock_irq(&current->sighand->siglock);
1956
1957 /* Outside the lock because only this thread touches it. */
1958 sigandsets(&pending, &current->blocked, &pending);
1959
1960 error = -EFAULT;
1961 if (!copy_to_user(set, &pending, sigsetsize))
1962 error = 0;
1963
1964out:
1965 return error;
1966}
1967
1968asmlinkage long
1969sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1970{
1971 return do_sigpending(set, sigsetsize);
1972}
1973
1974#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1975
1976int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1977{
1978 int err;
1979
1980 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1981 return -EFAULT;
1982 if (from->si_code < 0)
1983 return __copy_to_user(to, from, sizeof(siginfo_t))
1984 ? -EFAULT : 0;
1985 /*
1986 * If you change siginfo_t structure, please be sure
1987 * this code is fixed accordingly.
1988 * It should never copy any pad contained in the structure
1989 * to avoid security leaks, but must copy the generic
1990 * 3 ints plus the relevant union member.
1991 */
1992 err = __put_user(from->si_signo, &to->si_signo);
1993 err |= __put_user(from->si_errno, &to->si_errno);
1994 err |= __put_user((short)from->si_code, &to->si_code);
1995 switch (from->si_code & __SI_MASK) {
1996 case __SI_KILL:
1997 err |= __put_user(from->si_pid, &to->si_pid);
1998 err |= __put_user(from->si_uid, &to->si_uid);
1999 break;
2000 case __SI_TIMER:
2001 err |= __put_user(from->si_tid, &to->si_tid);
2002 err |= __put_user(from->si_overrun, &to->si_overrun);
2003 err |= __put_user(from->si_ptr, &to->si_ptr);
2004 break;
2005 case __SI_POLL:
2006 err |= __put_user(from->si_band, &to->si_band);
2007 err |= __put_user(from->si_fd, &to->si_fd);
2008 break;
2009 case __SI_FAULT:
2010 err |= __put_user(from->si_addr, &to->si_addr);
2011#ifdef __ARCH_SI_TRAPNO
2012 err |= __put_user(from->si_trapno, &to->si_trapno);
2013#endif
2014 break;
2015 case __SI_CHLD:
2016 err |= __put_user(from->si_pid, &to->si_pid);
2017 err |= __put_user(from->si_uid, &to->si_uid);
2018 err |= __put_user(from->si_status, &to->si_status);
2019 err |= __put_user(from->si_utime, &to->si_utime);
2020 err |= __put_user(from->si_stime, &to->si_stime);
2021 break;
2022 case __SI_RT: /* This is not generated by the kernel as of now. */
2023 case __SI_MESGQ: /* But this is */
2024 err |= __put_user(from->si_pid, &to->si_pid);
2025 err |= __put_user(from->si_uid, &to->si_uid);
2026 err |= __put_user(from->si_ptr, &to->si_ptr);
2027 break;
2028 default: /* this is just in case for now ... */
2029 err |= __put_user(from->si_pid, &to->si_pid);
2030 err |= __put_user(from->si_uid, &to->si_uid);
2031 break;
2032 }
2033 return err;
2034}
2035
2036#endif
2037
2038asmlinkage long
2039sys_rt_sigtimedwait(const sigset_t __user *uthese,
2040 siginfo_t __user *uinfo,
2041 const struct timespec __user *uts,
2042 size_t sigsetsize)
2043{
2044 int ret, sig;
2045 sigset_t these;
2046 struct timespec ts;
2047 siginfo_t info;
2048 long timeout = 0;
2049
2050 /* XXX: Don't preclude handling different sized sigset_t's. */
2051 if (sigsetsize != sizeof(sigset_t))
2052 return -EINVAL;
2053
2054 if (copy_from_user(&these, uthese, sizeof(these)))
2055 return -EFAULT;
2056
2057 /*
2058 * Invert the set of allowed signals to get those we
2059 * want to block.
2060 */
2061 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2062 signotset(&these);
2063
2064 if (uts) {
2065 if (copy_from_user(&ts, uts, sizeof(ts)))
2066 return -EFAULT;
2067 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2068 || ts.tv_sec < 0)
2069 return -EINVAL;
2070 }
2071
2072 spin_lock_irq(&current->sighand->siglock);
2073 sig = dequeue_signal(current, &these, &info);
2074 if (!sig) {
2075 timeout = MAX_SCHEDULE_TIMEOUT;
2076 if (uts)
2077 timeout = (timespec_to_jiffies(&ts)
2078 + (ts.tv_sec || ts.tv_nsec));
2079
2080 if (timeout) {
2081 /* None ready -- temporarily unblock those we're
2082 * interested while we are sleeping in so that we'll
2083 * be awakened when they arrive. */
2084 current->real_blocked = current->blocked;
2085 sigandsets(&current->blocked, &current->blocked, &these);
2086 recalc_sigpending();
2087 spin_unlock_irq(&current->sighand->siglock);
2088
Nishanth Aravamudan75bcc8c2005-09-10 00:27:24 -07002089 timeout = schedule_timeout_interruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 spin_lock_irq(&current->sighand->siglock);
2092 sig = dequeue_signal(current, &these, &info);
2093 current->blocked = current->real_blocked;
2094 siginitset(&current->real_blocked, 0);
2095 recalc_sigpending();
2096 }
2097 }
2098 spin_unlock_irq(&current->sighand->siglock);
2099
2100 if (sig) {
2101 ret = sig;
2102 if (uinfo) {
2103 if (copy_siginfo_to_user(uinfo, &info))
2104 ret = -EFAULT;
2105 }
2106 } else {
2107 ret = -EAGAIN;
2108 if (timeout)
2109 ret = -EINTR;
2110 }
2111
2112 return ret;
2113}
2114
2115asmlinkage long
2116sys_kill(int pid, int sig)
2117{
2118 struct siginfo info;
2119
2120 info.si_signo = sig;
2121 info.si_errno = 0;
2122 info.si_code = SI_USER;
2123 info.si_pid = current->tgid;
2124 info.si_uid = current->uid;
2125
2126 return kill_something_info(sig, &info, pid);
2127}
2128
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002129static int do_tkill(int tgid, int pid, int sig)
2130{
2131 int error;
2132 struct siginfo info;
2133 struct task_struct *p;
2134
2135 error = -ESRCH;
2136 info.si_signo = sig;
2137 info.si_errno = 0;
2138 info.si_code = SI_TKILL;
2139 info.si_pid = current->tgid;
2140 info.si_uid = current->uid;
2141
2142 read_lock(&tasklist_lock);
2143 p = find_task_by_pid(pid);
2144 if (p && (tgid <= 0 || p->tgid == tgid)) {
2145 error = check_kill_permission(sig, &info, p);
2146 /*
2147 * The null signal is a permissions and process existence
2148 * probe. No signal is actually delivered.
2149 */
2150 if (!error && sig && p->sighand) {
2151 spin_lock_irq(&p->sighand->siglock);
2152 handle_stop_signal(sig, p);
2153 error = specific_send_sig_info(sig, &info, p);
2154 spin_unlock_irq(&p->sighand->siglock);
2155 }
2156 }
2157 read_unlock(&tasklist_lock);
2158
2159 return error;
2160}
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162/**
2163 * sys_tgkill - send signal to one specific thread
2164 * @tgid: the thread group ID of the thread
2165 * @pid: the PID of the thread
2166 * @sig: signal to be sent
2167 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002168 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 * exists but it's not belonging to the target process anymore. This
2170 * method solves the problem of threads exiting and PIDs getting reused.
2171 */
2172asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2173{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 /* This is only valid for single tasks */
2175 if (pid <= 0 || tgid <= 0)
2176 return -EINVAL;
2177
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002178 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180
2181/*
2182 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2183 */
2184asmlinkage long
2185sys_tkill(int pid, int sig)
2186{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 /* This is only valid for single tasks */
2188 if (pid <= 0)
2189 return -EINVAL;
2190
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002191 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192}
2193
2194asmlinkage long
2195sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2196{
2197 siginfo_t info;
2198
2199 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2200 return -EFAULT;
2201
2202 /* Not even root can pretend to send signals from the kernel.
2203 Nor can they impersonate a kill(), which adds source info. */
2204 if (info.si_code >= 0)
2205 return -EPERM;
2206 info.si_signo = sig;
2207
2208 /* POSIX.1b doesn't mention process groups. */
2209 return kill_proc_info(sig, &info, pid);
2210}
2211
Oleg Nesterov88531f72006-03-28 16:11:24 -08002212int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213{
2214 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08002215 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
Jesper Juhl7ed20e12005-05-01 08:59:14 -07002217 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 return -EINVAL;
2219
2220 k = &current->sighand->action[sig-1];
2221
2222 spin_lock_irq(&current->sighand->siglock);
2223 if (signal_pending(current)) {
2224 /*
2225 * If there might be a fatal signal pending on multiple
2226 * threads, make sure we take it before changing the action.
2227 */
2228 spin_unlock_irq(&current->sighand->siglock);
2229 return -ERESTARTNOINTR;
2230 }
2231
2232 if (oact)
2233 *oact = *k;
2234
2235 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03002236 sigdelsetmask(&act->sa.sa_mask,
2237 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08002238 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 /*
2240 * POSIX 3.3.1.3:
2241 * "Setting a signal action to SIG_IGN for a signal that is
2242 * pending shall cause the pending signal to be discarded,
2243 * whether or not it is blocked."
2244 *
2245 * "Setting a signal action to SIG_DFL for a signal that is
2246 * pending and whose default action is to ignore the signal
2247 * (for example, SIGCHLD), shall cause the pending signal to
2248 * be discarded, whether or not it is blocked"
2249 */
2250 if (act->sa.sa_handler == SIG_IGN ||
Oleg Nesterov88531f72006-03-28 16:11:24 -08002251 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 struct task_struct *t = current;
George Anzinger71fabd52006-01-08 01:02:48 -08002253 sigemptyset(&mask);
2254 sigaddset(&mask, sig);
2255 rm_from_queue_full(&mask, &t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 do {
George Anzinger71fabd52006-01-08 01:02:48 -08002257 rm_from_queue_full(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 recalc_sigpending_tsk(t);
2259 t = next_thread(t);
2260 } while (t != current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 }
2263
2264 spin_unlock_irq(&current->sighand->siglock);
2265 return 0;
2266}
2267
2268int
2269do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2270{
2271 stack_t oss;
2272 int error;
2273
2274 if (uoss) {
2275 oss.ss_sp = (void __user *) current->sas_ss_sp;
2276 oss.ss_size = current->sas_ss_size;
2277 oss.ss_flags = sas_ss_flags(sp);
2278 }
2279
2280 if (uss) {
2281 void __user *ss_sp;
2282 size_t ss_size;
2283 int ss_flags;
2284
2285 error = -EFAULT;
2286 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2287 || __get_user(ss_sp, &uss->ss_sp)
2288 || __get_user(ss_flags, &uss->ss_flags)
2289 || __get_user(ss_size, &uss->ss_size))
2290 goto out;
2291
2292 error = -EPERM;
2293 if (on_sig_stack(sp))
2294 goto out;
2295
2296 error = -EINVAL;
2297 /*
2298 *
2299 * Note - this code used to test ss_flags incorrectly
2300 * old code may have been written using ss_flags==0
2301 * to mean ss_flags==SS_ONSTACK (as this was the only
2302 * way that worked) - this fix preserves that older
2303 * mechanism
2304 */
2305 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2306 goto out;
2307
2308 if (ss_flags == SS_DISABLE) {
2309 ss_size = 0;
2310 ss_sp = NULL;
2311 } else {
2312 error = -ENOMEM;
2313 if (ss_size < MINSIGSTKSZ)
2314 goto out;
2315 }
2316
2317 current->sas_ss_sp = (unsigned long) ss_sp;
2318 current->sas_ss_size = ss_size;
2319 }
2320
2321 if (uoss) {
2322 error = -EFAULT;
2323 if (copy_to_user(uoss, &oss, sizeof(oss)))
2324 goto out;
2325 }
2326
2327 error = 0;
2328out:
2329 return error;
2330}
2331
2332#ifdef __ARCH_WANT_SYS_SIGPENDING
2333
2334asmlinkage long
2335sys_sigpending(old_sigset_t __user *set)
2336{
2337 return do_sigpending(set, sizeof(*set));
2338}
2339
2340#endif
2341
2342#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2343/* Some platforms have their own version with special arguments others
2344 support only sys_rt_sigprocmask. */
2345
2346asmlinkage long
2347sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2348{
2349 int error;
2350 old_sigset_t old_set, new_set;
2351
2352 if (set) {
2353 error = -EFAULT;
2354 if (copy_from_user(&new_set, set, sizeof(*set)))
2355 goto out;
2356 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2357
2358 spin_lock_irq(&current->sighand->siglock);
2359 old_set = current->blocked.sig[0];
2360
2361 error = 0;
2362 switch (how) {
2363 default:
2364 error = -EINVAL;
2365 break;
2366 case SIG_BLOCK:
2367 sigaddsetmask(&current->blocked, new_set);
2368 break;
2369 case SIG_UNBLOCK:
2370 sigdelsetmask(&current->blocked, new_set);
2371 break;
2372 case SIG_SETMASK:
2373 current->blocked.sig[0] = new_set;
2374 break;
2375 }
2376
2377 recalc_sigpending();
2378 spin_unlock_irq(&current->sighand->siglock);
2379 if (error)
2380 goto out;
2381 if (oset)
2382 goto set_old;
2383 } else if (oset) {
2384 old_set = current->blocked.sig[0];
2385 set_old:
2386 error = -EFAULT;
2387 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2388 goto out;
2389 }
2390 error = 0;
2391out:
2392 return error;
2393}
2394#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2395
2396#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2397asmlinkage long
2398sys_rt_sigaction(int sig,
2399 const struct sigaction __user *act,
2400 struct sigaction __user *oact,
2401 size_t sigsetsize)
2402{
2403 struct k_sigaction new_sa, old_sa;
2404 int ret = -EINVAL;
2405
2406 /* XXX: Don't preclude handling different sized sigset_t's. */
2407 if (sigsetsize != sizeof(sigset_t))
2408 goto out;
2409
2410 if (act) {
2411 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2412 return -EFAULT;
2413 }
2414
2415 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2416
2417 if (!ret && oact) {
2418 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2419 return -EFAULT;
2420 }
2421out:
2422 return ret;
2423}
2424#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2425
2426#ifdef __ARCH_WANT_SYS_SGETMASK
2427
2428/*
2429 * For backwards compatibility. Functionality superseded by sigprocmask.
2430 */
2431asmlinkage long
2432sys_sgetmask(void)
2433{
2434 /* SMP safe */
2435 return current->blocked.sig[0];
2436}
2437
2438asmlinkage long
2439sys_ssetmask(int newmask)
2440{
2441 int old;
2442
2443 spin_lock_irq(&current->sighand->siglock);
2444 old = current->blocked.sig[0];
2445
2446 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2447 sigmask(SIGSTOP)));
2448 recalc_sigpending();
2449 spin_unlock_irq(&current->sighand->siglock);
2450
2451 return old;
2452}
2453#endif /* __ARCH_WANT_SGETMASK */
2454
2455#ifdef __ARCH_WANT_SYS_SIGNAL
2456/*
2457 * For backwards compatibility. Functionality superseded by sigaction.
2458 */
2459asmlinkage unsigned long
2460sys_signal(int sig, __sighandler_t handler)
2461{
2462 struct k_sigaction new_sa, old_sa;
2463 int ret;
2464
2465 new_sa.sa.sa_handler = handler;
2466 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d72006-02-09 22:41:41 +03002467 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
2469 ret = do_sigaction(sig, &new_sa, &old_sa);
2470
2471 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2472}
2473#endif /* __ARCH_WANT_SYS_SIGNAL */
2474
2475#ifdef __ARCH_WANT_SYS_PAUSE
2476
2477asmlinkage long
2478sys_pause(void)
2479{
2480 current->state = TASK_INTERRUPTIBLE;
2481 schedule();
2482 return -ERESTARTNOHAND;
2483}
2484
2485#endif
2486
David Woodhouse150256d2006-01-18 17:43:57 -08002487#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2488asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2489{
2490 sigset_t newset;
2491
2492 /* XXX: Don't preclude handling different sized sigset_t's. */
2493 if (sigsetsize != sizeof(sigset_t))
2494 return -EINVAL;
2495
2496 if (copy_from_user(&newset, unewset, sizeof(newset)))
2497 return -EFAULT;
2498 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2499
2500 spin_lock_irq(&current->sighand->siglock);
2501 current->saved_sigmask = current->blocked;
2502 current->blocked = newset;
2503 recalc_sigpending();
2504 spin_unlock_irq(&current->sighand->siglock);
2505
2506 current->state = TASK_INTERRUPTIBLE;
2507 schedule();
2508 set_thread_flag(TIF_RESTORE_SIGMASK);
2509 return -ERESTARTNOHAND;
2510}
2511#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2512
David Howellsf269fdd2006-09-27 01:50:23 -07002513__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2514{
2515 return NULL;
2516}
2517
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518void __init signals_init(void)
2519{
Christoph Lameter0a31bd52007-05-06 14:49:57 -07002520 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521}