2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache *sigqueue_cachep;
42 static int __sig_ignored(struct task_struct *t, int sig)
46 /* Is it explicitly or implicitly ignored? */
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig));
53 static int sig_ignored(struct task_struct *t, int sig)
56 * Tracers always want to know about signals..
58 if (t->ptrace & PT_PTRACED)
62 * Blocked signals are never ignored, since the
63 * signal handler may change by the time it is
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
69 return __sig_ignored(t, sig);
73 * Re-calculate pending state from the set of locally pending
74 * signals, globally pending signals, and blocked signals.
76 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
81 switch (_NSIG_WORDS) {
83 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
84 ready |= signal->sig[i] &~ blocked->sig[i];
87 case 4: ready = signal->sig[3] &~ blocked->sig[3];
88 ready |= signal->sig[2] &~ blocked->sig[2];
89 ready |= signal->sig[1] &~ blocked->sig[1];
90 ready |= signal->sig[0] &~ blocked->sig[0];
93 case 2: ready = signal->sig[1] &~ blocked->sig[1];
94 ready |= signal->sig[0] &~ blocked->sig[0];
97 case 1: ready = signal->sig[0] &~ blocked->sig[0];
102 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
104 static int recalc_sigpending_tsk(struct task_struct *t)
106 if (t->signal->group_stop_count > 0 ||
107 PENDING(&t->pending, &t->blocked) ||
108 PENDING(&t->signal->shared_pending, &t->blocked)) {
109 set_tsk_thread_flag(t, TIF_SIGPENDING);
113 * We must never clear the flag in another thread, or in current
114 * when it's possible the current syscall is returning -ERESTART*.
115 * So we don't clear it here, and only callers who know they should do.
121 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
122 * This is superfluous when called on current, the wakeup is a harmless no-op.
124 void recalc_sigpending_and_wake(struct task_struct *t)
126 if (recalc_sigpending_tsk(t))
127 signal_wake_up(t, 0);
130 void recalc_sigpending(void)
132 if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING);
137 /* Given the mask, find the first available signal that should be serviced. */
139 int next_signal(struct sigpending *pending, sigset_t *mask)
141 unsigned long i, *s, *m, x;
144 s = pending->signal.sig;
146 switch (_NSIG_WORDS) {
148 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
149 if ((x = *s &~ *m) != 0) {
150 sig = ffz(~x) + i*_NSIG_BPW + 1;
155 case 2: if ((x = s[0] &~ m[0]) != 0)
157 else if ((x = s[1] &~ m[1]) != 0)
164 case 1: if ((x = *s &~ *m) != 0)
172 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
175 struct sigqueue *q = NULL;
176 struct user_struct *user;
179 * In order to avoid problems with "switch_user()", we want to make
180 * sure that the compiler doesn't re-load "t->user"
184 atomic_inc(&user->sigpending);
185 if (override_rlimit ||
186 atomic_read(&user->sigpending) <=
187 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
188 q = kmem_cache_alloc(sigqueue_cachep, flags);
189 if (unlikely(q == NULL)) {
190 atomic_dec(&user->sigpending);
192 INIT_LIST_HEAD(&q->list);
194 q->user = get_uid(user);
199 static void __sigqueue_free(struct sigqueue *q)
201 if (q->flags & SIGQUEUE_PREALLOC)
203 atomic_dec(&q->user->sigpending);
205 kmem_cache_free(sigqueue_cachep, q);
208 void flush_sigqueue(struct sigpending *queue)
212 sigemptyset(&queue->signal);
213 while (!list_empty(&queue->list)) {
214 q = list_entry(queue->list.next, struct sigqueue , list);
215 list_del_init(&q->list);
221 * Flush all pending signals for a task.
223 void flush_signals(struct task_struct *t)
227 spin_lock_irqsave(&t->sighand->siglock, flags);
228 clear_tsk_thread_flag(t, TIF_SIGPENDING);
229 flush_sigqueue(&t->pending);
230 flush_sigqueue(&t->signal->shared_pending);
231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
234 static void __flush_itimer_signals(struct sigpending *pending)
236 sigset_t signal, retain;
237 struct sigqueue *q, *n;
239 signal = pending->signal;
240 sigemptyset(&retain);
242 list_for_each_entry_safe(q, n, &pending->list, list) {
243 int sig = q->info.si_signo;
245 if (likely(q->info.si_code != SI_TIMER)) {
246 sigaddset(&retain, sig);
248 sigdelset(&signal, sig);
249 list_del_init(&q->list);
254 sigorsets(&pending->signal, &signal, &retain);
257 void flush_itimer_signals(void)
259 struct task_struct *tsk = current;
262 spin_lock_irqsave(&tsk->sighand->siglock, flags);
263 __flush_itimer_signals(&tsk->pending);
264 __flush_itimer_signals(&tsk->signal->shared_pending);
265 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
268 void ignore_signals(struct task_struct *t)
272 for (i = 0; i < _NSIG; ++i)
273 t->sighand->action[i].sa.sa_handler = SIG_IGN;
279 * Flush all handlers for a task.
283 flush_signal_handlers(struct task_struct *t, int force_default)
286 struct k_sigaction *ka = &t->sighand->action[0];
287 for (i = _NSIG ; i != 0 ; i--) {
288 if (force_default || ka->sa.sa_handler != SIG_IGN)
289 ka->sa.sa_handler = SIG_DFL;
291 sigemptyset(&ka->sa.sa_mask);
296 int unhandled_signal(struct task_struct *tsk, int sig)
298 if (is_global_init(tsk))
300 if (tsk->ptrace & PT_PTRACED)
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
307 /* Notify the system that a driver wants to block all signals for this
308 * process, and wants to be notified if any signals at all were to be
309 * sent/acted upon. If the notifier routine returns non-zero, then the
310 * signal will be acted upon after all. If the notifier routine returns 0,
311 * then then signal will be blocked. Only one block per process is
312 * allowed. priv is a pointer to private data that the notifier routine
313 * can use to determine if the signal should be blocked or not. */
316 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
320 spin_lock_irqsave(¤t->sighand->siglock, flags);
321 current->notifier_mask = mask;
322 current->notifier_data = priv;
323 current->notifier = notifier;
324 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
327 /* Notify the system that blocking has ended. */
330 unblock_all_signals(void)
334 spin_lock_irqsave(¤t->sighand->siglock, flags);
335 current->notifier = NULL;
336 current->notifier_data = NULL;
338 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
341 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
343 struct sigqueue *q, *first = NULL;
346 * Collect the siginfo appropriate to this signal. Check if
347 * there is another siginfo for the same signal.
349 list_for_each_entry(q, &list->list, list) {
350 if (q->info.si_signo == sig) {
357 sigdelset(&list->signal, sig);
361 list_del_init(&first->list);
362 copy_siginfo(info, &first->info);
363 __sigqueue_free(first);
365 /* Ok, it wasn't in the queue. This must be
366 a fast-pathed signal or we must have been
367 out of queue space. So zero out the info.
369 info->si_signo = sig;
377 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
380 int sig = next_signal(pending, mask);
383 if (current->notifier) {
384 if (sigismember(current->notifier_mask, sig)) {
385 if (!(current->notifier)(current->notifier_data)) {
386 clear_thread_flag(TIF_SIGPENDING);
392 collect_signal(sig, pending, info);
399 * Dequeue a signal and return the element to the caller, which is
400 * expected to free it.
402 * All callers have to hold the siglock.
404 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
408 /* We only dequeue private signals from ourselves, we don't let
409 * signalfd steal them
411 signr = __dequeue_signal(&tsk->pending, mask, info);
413 signr = __dequeue_signal(&tsk->signal->shared_pending,
418 * itimers are process shared and we restart periodic
419 * itimers in the signal delivery path to prevent DoS
420 * attacks in the high resolution timer case. This is
421 * compliant with the old way of self restarting
422 * itimers, as the SIGALRM is a legacy signal and only
423 * queued once. Changing the restart behaviour to
424 * restart the timer in the signal dequeue path is
425 * reducing the timer noise on heavy loaded !highres
428 if (unlikely(signr == SIGALRM)) {
429 struct hrtimer *tmr = &tsk->signal->real_timer;
431 if (!hrtimer_is_queued(tmr) &&
432 tsk->signal->it_real_incr.tv64 != 0) {
433 hrtimer_forward(tmr, tmr->base->get_time(),
434 tsk->signal->it_real_incr);
435 hrtimer_restart(tmr);
444 if (unlikely(sig_kernel_stop(signr))) {
446 * Set a marker that we have dequeued a stop signal. Our
447 * caller might release the siglock and then the pending
448 * stop signal it is about to process is no longer in the
449 * pending bitmasks, but must still be cleared by a SIGCONT
450 * (and overruled by a SIGKILL). So those cases clear this
451 * shared flag after we've set it. Note that this flag may
452 * remain set after the signal we return is ignored or
453 * handled. That doesn't matter because its only purpose
454 * is to alert stop-signal processing code when another
455 * processor has come along and cleared the flag.
457 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
458 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
460 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
462 * Release the siglock to ensure proper locking order
463 * of timer locks outside of siglocks. Note, we leave
464 * irqs disabled here, since the posix-timers code is
465 * about to disable them again anyway.
467 spin_unlock(&tsk->sighand->siglock);
468 do_schedule_next_timer(info);
469 spin_lock(&tsk->sighand->siglock);
475 * Tell a process that it has a new active signal..
477 * NOTE! we rely on the previous spin_lock to
478 * lock interrupts for us! We can only be called with
479 * "siglock" held, and the local interrupt must
480 * have been disabled when that got acquired!
482 * No need to set need_resched since signal event passing
483 * goes through ->blocked
485 void signal_wake_up(struct task_struct *t, int resume)
489 set_tsk_thread_flag(t, TIF_SIGPENDING);
492 * For SIGKILL, we want to wake it up in the stopped/traced/killable
493 * case. We don't check t->state here because there is a race with it
494 * executing another processor and just now entering stopped state.
495 * By using wake_up_state, we ensure the process will wake up and
496 * handle its death signal.
498 mask = TASK_INTERRUPTIBLE;
500 mask |= TASK_WAKEKILL;
501 if (!wake_up_state(t, mask))
506 * Remove signals in mask from the pending set and queue.
507 * Returns 1 if any signals were found.
509 * All callers must be holding the siglock.
511 * This version takes a sigset mask and looks at all signals,
512 * not just those in the first mask word.
514 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
516 struct sigqueue *q, *n;
519 sigandsets(&m, mask, &s->signal);
520 if (sigisemptyset(&m))
523 signandsets(&s->signal, &s->signal, mask);
524 list_for_each_entry_safe(q, n, &s->list, list) {
525 if (sigismember(mask, q->info.si_signo)) {
526 list_del_init(&q->list);
533 * Remove signals in mask from the pending set and queue.
534 * Returns 1 if any signals were found.
536 * All callers must be holding the siglock.
538 static int rm_from_queue(unsigned long mask, struct sigpending *s)
540 struct sigqueue *q, *n;
542 if (!sigtestsetmask(&s->signal, mask))
545 sigdelsetmask(&s->signal, mask);
546 list_for_each_entry_safe(q, n, &s->list, list) {
547 if (q->info.si_signo < SIGRTMIN &&
548 (mask & sigmask(q->info.si_signo))) {
549 list_del_init(&q->list);
557 * Bad permissions for sending the signal
559 static int check_kill_permission(int sig, struct siginfo *info,
560 struct task_struct *t)
565 if (!valid_signal(sig))
568 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
571 error = audit_signal_info(sig, t); /* Let audit system see the signal */
575 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
576 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
577 !capable(CAP_KILL)) {
580 sid = task_session(t);
582 * We don't return the error if sid == NULL. The
583 * task was unhashed, the caller must notice this.
585 if (!sid || sid == task_session(current))
592 return security_task_kill(t, info, sig, 0);
596 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
599 * Handle magic process-wide effects of stop/continue signals. Unlike
600 * the signal actions, these happen immediately at signal-generation
601 * time regardless of blocking, ignoring, or handling. This does the
602 * actual continuing for SIGCONT, but not the actual stopping for stop
603 * signals. The process stop is done as a signal action for SIG_DFL.
605 * Returns true if the signal should be actually delivered, otherwise
606 * it should be dropped.
608 static int prepare_signal(int sig, struct task_struct *p)
610 struct signal_struct *signal = p->signal;
611 struct task_struct *t;
613 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
615 * The process is in the middle of dying, nothing to do.
617 } else if (sig_kernel_stop(sig)) {
619 * This is a stop signal. Remove SIGCONT from all queues.
621 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
624 rm_from_queue(sigmask(SIGCONT), &t->pending);
625 } while_each_thread(p, t);
626 } else if (sig == SIGCONT) {
629 * Remove all stop signals from all queues,
630 * and wake all threads.
632 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
636 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
638 * If there is a handler for SIGCONT, we must make
639 * sure that no thread returns to user mode before
640 * we post the signal, in case it was the only
641 * thread eligible to run the signal handler--then
642 * it must not do anything between resuming and
643 * running the handler. With the TIF_SIGPENDING
644 * flag set, the thread will pause and acquire the
645 * siglock that we hold now and until we've queued
646 * the pending signal.
648 * Wake up the stopped thread _after_ setting
651 state = __TASK_STOPPED;
652 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
653 set_tsk_thread_flag(t, TIF_SIGPENDING);
654 state |= TASK_INTERRUPTIBLE;
656 wake_up_state(t, state);
657 } while_each_thread(p, t);
660 * Notify the parent with CLD_CONTINUED if we were stopped.
662 * If we were in the middle of a group stop, we pretend it
663 * was already finished, and then continued. Since SIGCHLD
664 * doesn't queue we report only CLD_STOPPED, as if the next
665 * CLD_CONTINUED was dropped.
668 if (signal->flags & SIGNAL_STOP_STOPPED)
669 why |= SIGNAL_CLD_CONTINUED;
670 else if (signal->group_stop_count)
671 why |= SIGNAL_CLD_STOPPED;
675 * The first thread which returns from finish_stop()
676 * will take ->siglock, notice SIGNAL_CLD_MASK, and
677 * notify its parent. See get_signal_to_deliver().
679 signal->flags = why | SIGNAL_STOP_CONTINUED;
680 signal->group_stop_count = 0;
681 signal->group_exit_code = 0;
684 * We are not stopped, but there could be a stop
685 * signal in the middle of being processed after
686 * being removed from the queue. Clear that too.
688 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
692 return !sig_ignored(p, sig);
696 * Test if P wants to take SIG. After we've checked all threads with this,
697 * it's equivalent to finding no threads not blocking SIG. Any threads not
698 * blocking SIG were ruled out because they are not running and already
699 * have pending signals. Such threads will dequeue from the shared queue
700 * as soon as they're available, so putting the signal on the shared queue
701 * will be equivalent to sending it to one such thread.
703 static inline int wants_signal(int sig, struct task_struct *p)
705 if (sigismember(&p->blocked, sig))
707 if (p->flags & PF_EXITING)
711 if (task_is_stopped_or_traced(p))
713 return task_curr(p) || !signal_pending(p);
716 static void complete_signal(int sig, struct task_struct *p, int group)
718 struct signal_struct *signal = p->signal;
719 struct task_struct *t;
722 * Now find a thread we can wake up to take the signal off the queue.
724 * If the main thread wants the signal, it gets first crack.
725 * Probably the least surprising to the average bear.
727 if (wants_signal(sig, p))
729 else if (!group || thread_group_empty(p))
731 * There is just one thread and it does not need to be woken.
732 * It will dequeue unblocked signals before it runs again.
737 * Otherwise try to find a suitable thread.
739 t = signal->curr_target;
740 while (!wants_signal(sig, t)) {
742 if (t == signal->curr_target)
744 * No thread needs to be woken.
745 * Any eligible threads will see
746 * the signal in the queue soon.
750 signal->curr_target = t;
754 * Found a killable thread. If the signal will be fatal,
755 * then start taking the whole group down immediately.
757 if (sig_fatal(p, sig) &&
758 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
759 !sigismember(&t->real_blocked, sig) &&
760 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
762 * This signal will be fatal to the whole group.
764 if (!sig_kernel_coredump(sig)) {
766 * Start a group exit and wake everybody up.
767 * This way we don't have other threads
768 * running and doing things after a slower
769 * thread has the fatal signal pending.
771 signal->flags = SIGNAL_GROUP_EXIT;
772 signal->group_exit_code = sig;
773 signal->group_stop_count = 0;
776 sigaddset(&t->pending.signal, SIGKILL);
777 signal_wake_up(t, 1);
778 } while_each_thread(p, t);
784 * The signal is already in the shared-pending queue.
785 * Tell the chosen thread to wake up and dequeue it.
787 signal_wake_up(t, sig == SIGKILL);
791 static inline int legacy_queue(struct sigpending *signals, int sig)
793 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
796 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
799 struct sigpending *pending;
802 assert_spin_locked(&t->sighand->siglock);
803 if (!prepare_signal(sig, t))
806 pending = group ? &t->signal->shared_pending : &t->pending;
808 * Short-circuit ignored signals and support queuing
809 * exactly one non-rt signal, so that we can get more
810 * detailed information about the cause of the signal.
812 if (legacy_queue(pending, sig))
815 * fast-pathed signals for kernel-internal things like SIGSTOP
818 if (info == SEND_SIG_FORCED)
821 /* Real-time signals must be queued if sent by sigqueue, or
822 some other real-time mechanism. It is implementation
823 defined whether kill() does so. We attempt to do so, on
824 the principle of least surprise, but since kill is not
825 allowed to fail with EAGAIN when low on memory we just
826 make sure at least one signal gets delivered and don't
827 pass on the info struct. */
829 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
830 (is_si_special(info) ||
831 info->si_code >= 0)));
833 list_add_tail(&q->list, &pending->list);
834 switch ((unsigned long) info) {
835 case (unsigned long) SEND_SIG_NOINFO:
836 q->info.si_signo = sig;
837 q->info.si_errno = 0;
838 q->info.si_code = SI_USER;
839 q->info.si_pid = task_pid_vnr(current);
840 q->info.si_uid = current->uid;
842 case (unsigned long) SEND_SIG_PRIV:
843 q->info.si_signo = sig;
844 q->info.si_errno = 0;
845 q->info.si_code = SI_KERNEL;
850 copy_siginfo(&q->info, info);
853 } else if (!is_si_special(info)) {
854 if (sig >= SIGRTMIN && info->si_code != SI_USER)
856 * Queue overflow, abort. We may abort if the signal was rt
857 * and sent by user using something other than kill().
863 signalfd_notify(t, sig);
864 sigaddset(&pending->signal, sig);
865 complete_signal(sig, t, group);
869 int print_fatal_signals;
871 static void print_fatal_signal(struct pt_regs *regs, int signr)
873 printk("%s/%d: potentially unexpected fatal signal %d.\n",
874 current->comm, task_pid_nr(current), signr);
876 #if defined(__i386__) && !defined(__arch_um__)
877 printk("code at %08lx: ", regs->ip);
880 for (i = 0; i < 16; i++) {
883 __get_user(insn, (unsigned char *)(regs->ip + i));
884 printk("%02x ", insn);
892 static int __init setup_print_fatal_signals(char *str)
894 get_option (&str, &print_fatal_signals);
899 __setup("print-fatal-signals=", setup_print_fatal_signals);
902 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
904 return send_signal(sig, info, p, 1);
908 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
910 return send_signal(sig, info, t, 0);
914 * Force a signal that the process can't ignore: if necessary
915 * we unblock the signal and change any SIG_IGN to SIG_DFL.
917 * Note: If we unblock the signal, we always reset it to SIG_DFL,
918 * since we do not want to have a signal handler that was blocked
919 * be invoked when user space had explicitly blocked it.
921 * We don't want to have recursive SIGSEGV's etc, for example,
922 * that is why we also clear SIGNAL_UNKILLABLE.
925 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
927 unsigned long int flags;
928 int ret, blocked, ignored;
929 struct k_sigaction *action;
931 spin_lock_irqsave(&t->sighand->siglock, flags);
932 action = &t->sighand->action[sig-1];
933 ignored = action->sa.sa_handler == SIG_IGN;
934 blocked = sigismember(&t->blocked, sig);
935 if (blocked || ignored) {
936 action->sa.sa_handler = SIG_DFL;
938 sigdelset(&t->blocked, sig);
939 recalc_sigpending_and_wake(t);
942 if (action->sa.sa_handler == SIG_DFL)
943 t->signal->flags &= ~SIGNAL_UNKILLABLE;
944 ret = specific_send_sig_info(sig, info, t);
945 spin_unlock_irqrestore(&t->sighand->siglock, flags);
951 force_sig_specific(int sig, struct task_struct *t)
953 force_sig_info(sig, SEND_SIG_FORCED, t);
957 * Nuke all other threads in the group.
959 void zap_other_threads(struct task_struct *p)
961 struct task_struct *t;
963 p->signal->group_stop_count = 0;
965 for (t = next_thread(p); t != p; t = next_thread(t)) {
967 * Don't bother with already dead threads
972 /* SIGKILL will be handled before any pending SIGSTOP */
973 sigaddset(&t->pending.signal, SIGKILL);
974 signal_wake_up(t, 1);
978 int __fatal_signal_pending(struct task_struct *tsk)
980 return sigismember(&tsk->pending.signal, SIGKILL);
982 EXPORT_SYMBOL(__fatal_signal_pending);
984 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
986 struct sighand_struct *sighand;
990 sighand = rcu_dereference(tsk->sighand);
991 if (unlikely(sighand == NULL))
994 spin_lock_irqsave(&sighand->siglock, *flags);
995 if (likely(sighand == tsk->sighand))
997 spin_unlock_irqrestore(&sighand->siglock, *flags);
1004 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1006 unsigned long flags;
1009 ret = check_kill_permission(sig, info, p);
1013 if (lock_task_sighand(p, &flags)) {
1014 ret = __group_send_sig_info(sig, info, p);
1015 unlock_task_sighand(p, &flags);
1023 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1024 * control characters do (^C, ^Z etc)
1027 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1029 struct task_struct *p = NULL;
1030 int retval, success;
1034 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1035 int err = group_send_sig_info(sig, info, p);
1038 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1039 return success ? 0 : retval;
1042 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1045 struct task_struct *p;
1049 p = pid_task(pid, PIDTYPE_PID);
1051 error = group_send_sig_info(sig, info, p);
1052 if (unlikely(error == -ESRCH))
1054 * The task was unhashed in between, try again.
1055 * If it is dead, pid_task() will return NULL,
1056 * if we race with de_thread() it will find the
1067 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1071 error = kill_pid_info(sig, info, find_vpid(pid));
1076 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1077 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1078 uid_t uid, uid_t euid, u32 secid)
1081 struct task_struct *p;
1083 if (!valid_signal(sig))
1086 read_lock(&tasklist_lock);
1087 p = pid_task(pid, PIDTYPE_PID);
1092 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1093 && (euid != p->suid) && (euid != p->uid)
1094 && (uid != p->suid) && (uid != p->uid)) {
1098 ret = security_task_kill(p, info, sig, secid);
1101 if (sig && p->sighand) {
1102 unsigned long flags;
1103 spin_lock_irqsave(&p->sighand->siglock, flags);
1104 ret = __group_send_sig_info(sig, info, p);
1105 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1108 read_unlock(&tasklist_lock);
1111 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1114 * kill_something_info() interprets pid in interesting ways just like kill(2).
1116 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1117 * is probably wrong. Should make it like BSD or SYSV.
1120 static int kill_something_info(int sig, struct siginfo *info, int pid)
1126 ret = kill_pid_info(sig, info, find_vpid(pid));
1131 read_lock(&tasklist_lock);
1133 ret = __kill_pgrp_info(sig, info,
1134 pid ? find_vpid(-pid) : task_pgrp(current));
1136 int retval = 0, count = 0;
1137 struct task_struct * p;
1139 for_each_process(p) {
1140 if (p->pid > 1 && !same_thread_group(p, current)) {
1141 int err = group_send_sig_info(sig, info, p);
1147 ret = count ? retval : -ESRCH;
1149 read_unlock(&tasklist_lock);
1155 * These are for backward compatibility with the rest of the kernel source.
1159 * The caller must ensure the task can't exit.
1162 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1165 unsigned long flags;
1168 * Make sure legacy kernel users don't send in bad values
1169 * (normal paths check this in check_kill_permission).
1171 if (!valid_signal(sig))
1174 spin_lock_irqsave(&p->sighand->siglock, flags);
1175 ret = specific_send_sig_info(sig, info, p);
1176 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1180 #define __si_special(priv) \
1181 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1184 send_sig(int sig, struct task_struct *p, int priv)
1186 return send_sig_info(sig, __si_special(priv), p);
1190 force_sig(int sig, struct task_struct *p)
1192 force_sig_info(sig, SEND_SIG_PRIV, p);
1196 * When things go south during signal handling, we
1197 * will force a SIGSEGV. And if the signal that caused
1198 * the problem was already a SIGSEGV, we'll want to
1199 * make sure we don't even try to deliver the signal..
1202 force_sigsegv(int sig, struct task_struct *p)
1204 if (sig == SIGSEGV) {
1205 unsigned long flags;
1206 spin_lock_irqsave(&p->sighand->siglock, flags);
1207 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1208 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1210 force_sig(SIGSEGV, p);
1214 int kill_pgrp(struct pid *pid, int sig, int priv)
1218 read_lock(&tasklist_lock);
1219 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1220 read_unlock(&tasklist_lock);
1224 EXPORT_SYMBOL(kill_pgrp);
1226 int kill_pid(struct pid *pid, int sig, int priv)
1228 return kill_pid_info(sig, __si_special(priv), pid);
1230 EXPORT_SYMBOL(kill_pid);
1233 kill_proc(pid_t pid, int sig, int priv)
1238 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1244 * These functions support sending signals using preallocated sigqueue
1245 * structures. This is needed "because realtime applications cannot
1246 * afford to lose notifications of asynchronous events, like timer
1247 * expirations or I/O completions". In the case of Posix Timers
1248 * we allocate the sigqueue structure from the timer_create. If this
1249 * allocation fails we are able to report the failure to the application
1250 * with an EAGAIN error.
1253 struct sigqueue *sigqueue_alloc(void)
1257 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1258 q->flags |= SIGQUEUE_PREALLOC;
1262 void sigqueue_free(struct sigqueue *q)
1264 unsigned long flags;
1265 spinlock_t *lock = ¤t->sighand->siglock;
1267 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1269 * We must hold ->siglock while testing q->list
1270 * to serialize with collect_signal() or with
1271 * __exit_signal()->flush_sigqueue().
1273 spin_lock_irqsave(lock, flags);
1274 q->flags &= ~SIGQUEUE_PREALLOC;
1276 * If it is queued it will be freed when dequeued,
1277 * like the "regular" sigqueue.
1279 if (!list_empty(&q->list))
1281 spin_unlock_irqrestore(lock, flags);
1287 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1289 int sig = q->info.si_signo;
1290 struct sigpending *pending;
1291 unsigned long flags;
1294 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1297 if (!likely(lock_task_sighand(t, &flags)))
1300 ret = 1; /* the signal is ignored */
1301 if (!prepare_signal(sig, t))
1305 if (unlikely(!list_empty(&q->list))) {
1307 * If an SI_TIMER entry is already queue just increment
1308 * the overrun count.
1310 BUG_ON(q->info.si_code != SI_TIMER);
1311 q->info.si_overrun++;
1315 signalfd_notify(t, sig);
1316 pending = group ? &t->signal->shared_pending : &t->pending;
1317 list_add_tail(&q->list, &pending->list);
1318 sigaddset(&pending->signal, sig);
1319 complete_signal(sig, t, group);
1321 unlock_task_sighand(t, &flags);
1327 * Wake up any threads in the parent blocked in wait* syscalls.
1329 static inline void __wake_up_parent(struct task_struct *p,
1330 struct task_struct *parent)
1332 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1336 * Let a parent know about the death of a child.
1337 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1340 void do_notify_parent(struct task_struct *tsk, int sig)
1342 struct siginfo info;
1343 unsigned long flags;
1344 struct sighand_struct *psig;
1348 /* do_notify_parent_cldstop should have been called instead. */
1349 BUG_ON(task_is_stopped_or_traced(tsk));
1351 BUG_ON(!tsk->ptrace &&
1352 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1354 info.si_signo = sig;
1357 * we are under tasklist_lock here so our parent is tied to
1358 * us and cannot exit and release its namespace.
1360 * the only it can is to switch its nsproxy with sys_unshare,
1361 * bu uncharing pid namespaces is not allowed, so we'll always
1362 * see relevant namespace
1364 * write_lock() currently calls preempt_disable() which is the
1365 * same as rcu_read_lock(), but according to Oleg, this is not
1366 * correct to rely on this
1369 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1372 info.si_uid = tsk->uid;
1374 /* FIXME: find out whether or not this is supposed to be c*time. */
1375 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1376 tsk->signal->utime));
1377 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1378 tsk->signal->stime));
1380 info.si_status = tsk->exit_code & 0x7f;
1381 if (tsk->exit_code & 0x80)
1382 info.si_code = CLD_DUMPED;
1383 else if (tsk->exit_code & 0x7f)
1384 info.si_code = CLD_KILLED;
1386 info.si_code = CLD_EXITED;
1387 info.si_status = tsk->exit_code >> 8;
1390 psig = tsk->parent->sighand;
1391 spin_lock_irqsave(&psig->siglock, flags);
1392 if (!tsk->ptrace && sig == SIGCHLD &&
1393 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1394 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1396 * We are exiting and our parent doesn't care. POSIX.1
1397 * defines special semantics for setting SIGCHLD to SIG_IGN
1398 * or setting the SA_NOCLDWAIT flag: we should be reaped
1399 * automatically and not left for our parent's wait4 call.
1400 * Rather than having the parent do it as a magic kind of
1401 * signal handler, we just set this to tell do_exit that we
1402 * can be cleaned up without becoming a zombie. Note that
1403 * we still call __wake_up_parent in this case, because a
1404 * blocked sys_wait4 might now return -ECHILD.
1406 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1407 * is implementation-defined: we do (if you don't want
1408 * it, just use SIG_IGN instead).
1410 tsk->exit_signal = -1;
1411 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1414 if (valid_signal(sig) && sig > 0)
1415 __group_send_sig_info(sig, &info, tsk->parent);
1416 __wake_up_parent(tsk, tsk->parent);
1417 spin_unlock_irqrestore(&psig->siglock, flags);
1420 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1422 struct siginfo info;
1423 unsigned long flags;
1424 struct task_struct *parent;
1425 struct sighand_struct *sighand;
1427 if (tsk->ptrace & PT_PTRACED)
1428 parent = tsk->parent;
1430 tsk = tsk->group_leader;
1431 parent = tsk->real_parent;
1434 info.si_signo = SIGCHLD;
1437 * see comment in do_notify_parent() abot the following 3 lines
1440 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1443 info.si_uid = tsk->uid;
1445 /* FIXME: find out whether or not this is supposed to be c*time. */
1446 info.si_utime = cputime_to_jiffies(tsk->utime);
1447 info.si_stime = cputime_to_jiffies(tsk->stime);
1452 info.si_status = SIGCONT;
1455 info.si_status = tsk->signal->group_exit_code & 0x7f;
1458 info.si_status = tsk->exit_code & 0x7f;
1464 sighand = parent->sighand;
1465 spin_lock_irqsave(&sighand->siglock, flags);
1466 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1467 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1468 __group_send_sig_info(SIGCHLD, &info, parent);
1470 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1472 __wake_up_parent(tsk, parent);
1473 spin_unlock_irqrestore(&sighand->siglock, flags);
1476 static inline int may_ptrace_stop(void)
1478 if (!likely(current->ptrace & PT_PTRACED))
1481 * Are we in the middle of do_coredump?
1482 * If so and our tracer is also part of the coredump stopping
1483 * is a deadlock situation, and pointless because our tracer
1484 * is dead so don't allow us to stop.
1485 * If SIGKILL was already sent before the caller unlocked
1486 * ->siglock we must see ->core_waiters != 0. Otherwise it
1487 * is safe to enter schedule().
1489 if (unlikely(current->mm->core_waiters) &&
1490 unlikely(current->mm == current->parent->mm))
1497 * Return nonzero if there is a SIGKILL that should be waking us up.
1498 * Called with the siglock held.
1500 static int sigkill_pending(struct task_struct *tsk)
1502 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1503 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1504 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1508 * This must be called with current->sighand->siglock held.
1510 * This should be the path for all ptrace stops.
1511 * We always set current->last_siginfo while stopped here.
1512 * That makes it a way to test a stopped process for
1513 * being ptrace-stopped vs being job-control-stopped.
1515 * If we actually decide not to stop at all because the tracer
1516 * is gone, we keep current->exit_code unless clear_code.
1518 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1522 if (arch_ptrace_stop_needed(exit_code, info)) {
1524 * The arch code has something special to do before a
1525 * ptrace stop. This is allowed to block, e.g. for faults
1526 * on user stack pages. We can't keep the siglock while
1527 * calling arch_ptrace_stop, so we must release it now.
1528 * To preserve proper semantics, we must do this before
1529 * any signal bookkeeping like checking group_stop_count.
1530 * Meanwhile, a SIGKILL could come in before we retake the
1531 * siglock. That must prevent us from sleeping in TASK_TRACED.
1532 * So after regaining the lock, we must check for SIGKILL.
1534 spin_unlock_irq(¤t->sighand->siglock);
1535 arch_ptrace_stop(exit_code, info);
1536 spin_lock_irq(¤t->sighand->siglock);
1537 killed = sigkill_pending(current);
1541 * If there is a group stop in progress,
1542 * we must participate in the bookkeeping.
1544 if (current->signal->group_stop_count > 0)
1545 --current->signal->group_stop_count;
1547 current->last_siginfo = info;
1548 current->exit_code = exit_code;
1550 /* Let the debugger run. */
1551 __set_current_state(TASK_TRACED);
1552 spin_unlock_irq(¤t->sighand->siglock);
1553 read_lock(&tasklist_lock);
1554 if (!unlikely(killed) && may_ptrace_stop()) {
1555 do_notify_parent_cldstop(current, CLD_TRAPPED);
1556 read_unlock(&tasklist_lock);
1560 * By the time we got the lock, our tracer went away.
1561 * Don't drop the lock yet, another tracer may come.
1563 __set_current_state(TASK_RUNNING);
1565 current->exit_code = 0;
1566 read_unlock(&tasklist_lock);
1570 * While in TASK_TRACED, we were considered "frozen enough".
1571 * Now that we woke up, it's crucial if we're supposed to be
1572 * frozen that we freeze now before running anything substantial.
1577 * We are back. Now reacquire the siglock before touching
1578 * last_siginfo, so that we are sure to have synchronized with
1579 * any signal-sending on another CPU that wants to examine it.
1581 spin_lock_irq(¤t->sighand->siglock);
1582 current->last_siginfo = NULL;
1585 * Queued signals ignored us while we were stopped for tracing.
1586 * So check for any that we should take before resuming user mode.
1587 * This sets TIF_SIGPENDING, but never clears it.
1589 recalc_sigpending_tsk(current);
1592 void ptrace_notify(int exit_code)
1596 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1598 memset(&info, 0, sizeof info);
1599 info.si_signo = SIGTRAP;
1600 info.si_code = exit_code;
1601 info.si_pid = task_pid_vnr(current);
1602 info.si_uid = current->uid;
1604 /* Let the debugger run. */
1605 spin_lock_irq(¤t->sighand->siglock);
1606 ptrace_stop(exit_code, 1, &info);
1607 spin_unlock_irq(¤t->sighand->siglock);
1611 finish_stop(int stop_count)
1614 * If there are no other threads in the group, or if there is
1615 * a group stop in progress and we are the last to stop,
1616 * report to the parent. When ptraced, every thread reports itself.
1618 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1619 read_lock(&tasklist_lock);
1620 do_notify_parent_cldstop(current, CLD_STOPPED);
1621 read_unlock(&tasklist_lock);
1626 } while (try_to_freeze());
1628 * Now we don't run again until continued.
1630 current->exit_code = 0;
1634 * This performs the stopping for SIGSTOP and other stop signals.
1635 * We have to stop all threads in the thread group.
1636 * Returns nonzero if we've actually stopped and released the siglock.
1637 * Returns zero if we didn't stop and still hold the siglock.
1639 static int do_signal_stop(int signr)
1641 struct signal_struct *sig = current->signal;
1644 if (sig->group_stop_count > 0) {
1646 * There is a group stop in progress. We don't need to
1647 * start another one.
1649 stop_count = --sig->group_stop_count;
1651 struct task_struct *t;
1653 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
1654 != SIGNAL_STOP_DEQUEUED) ||
1655 unlikely(signal_group_exit(sig)))
1658 * There is no group stop already in progress.
1659 * We must initiate one now.
1661 sig->group_exit_code = signr;
1664 for (t = next_thread(current); t != current; t = next_thread(t))
1666 * Setting state to TASK_STOPPED for a group
1667 * stop is always done with the siglock held,
1668 * so this check has no races.
1670 if (!(t->flags & PF_EXITING) &&
1671 !task_is_stopped_or_traced(t)) {
1673 signal_wake_up(t, 0);
1675 sig->group_stop_count = stop_count;
1678 if (stop_count == 0)
1679 sig->flags = SIGNAL_STOP_STOPPED;
1680 current->exit_code = sig->group_exit_code;
1681 __set_current_state(TASK_STOPPED);
1683 spin_unlock_irq(¤t->sighand->siglock);
1684 finish_stop(stop_count);
1688 static int ptrace_signal(int signr, siginfo_t *info,
1689 struct pt_regs *regs, void *cookie)
1691 if (!(current->ptrace & PT_PTRACED))
1694 ptrace_signal_deliver(regs, cookie);
1696 /* Let the debugger run. */
1697 ptrace_stop(signr, 0, info);
1699 /* We're back. Did the debugger cancel the sig? */
1700 signr = current->exit_code;
1704 current->exit_code = 0;
1706 /* Update the siginfo structure if the signal has
1707 changed. If the debugger wanted something
1708 specific in the siginfo structure then it should
1709 have updated *info via PTRACE_SETSIGINFO. */
1710 if (signr != info->si_signo) {
1711 info->si_signo = signr;
1713 info->si_code = SI_USER;
1714 info->si_pid = task_pid_vnr(current->parent);
1715 info->si_uid = current->parent->uid;
1718 /* If the (new) signal is now blocked, requeue it. */
1719 if (sigismember(¤t->blocked, signr)) {
1720 specific_send_sig_info(signr, info, current);
1727 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1728 struct pt_regs *regs, void *cookie)
1730 struct sighand_struct *sighand = current->sighand;
1731 struct signal_struct *signal = current->signal;
1736 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1737 * While in TASK_STOPPED, we were considered "frozen enough".
1738 * Now that we woke up, it's crucial if we're supposed to be
1739 * frozen that we freeze now before running anything substantial.
1743 spin_lock_irq(&sighand->siglock);
1745 * Every stopped thread goes here after wakeup. Check to see if
1746 * we should notify the parent, prepare_signal(SIGCONT) encodes
1747 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1749 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1750 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1751 ? CLD_CONTINUED : CLD_STOPPED;
1752 signal->flags &= ~SIGNAL_CLD_MASK;
1753 spin_unlock_irq(&sighand->siglock);
1755 read_lock(&tasklist_lock);
1756 do_notify_parent_cldstop(current->group_leader, why);
1757 read_unlock(&tasklist_lock);
1762 struct k_sigaction *ka;
1764 if (unlikely(signal->group_stop_count > 0) &&
1768 signr = dequeue_signal(current, ¤t->blocked, info);
1770 break; /* will return 0 */
1772 if (signr != SIGKILL) {
1773 signr = ptrace_signal(signr, info, regs, cookie);
1778 ka = &sighand->action[signr-1];
1779 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1781 if (ka->sa.sa_handler != SIG_DFL) {
1782 /* Run the handler. */
1785 if (ka->sa.sa_flags & SA_ONESHOT)
1786 ka->sa.sa_handler = SIG_DFL;
1788 break; /* will return non-zero "signr" value */
1792 * Now we are doing the default action for this signal.
1794 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1798 * Global init gets no signals it doesn't want.
1800 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1801 !signal_group_exit(signal))
1804 if (sig_kernel_stop(signr)) {
1806 * The default action is to stop all threads in
1807 * the thread group. The job control signals
1808 * do nothing in an orphaned pgrp, but SIGSTOP
1809 * always works. Note that siglock needs to be
1810 * dropped during the call to is_orphaned_pgrp()
1811 * because of lock ordering with tasklist_lock.
1812 * This allows an intervening SIGCONT to be posted.
1813 * We need to check for that and bail out if necessary.
1815 if (signr != SIGSTOP) {
1816 spin_unlock_irq(&sighand->siglock);
1818 /* signals can be posted during this window */
1820 if (is_current_pgrp_orphaned())
1823 spin_lock_irq(&sighand->siglock);
1826 if (likely(do_signal_stop(signr))) {
1827 /* It released the siglock. */
1832 * We didn't actually stop, due to a race
1833 * with SIGCONT or something like that.
1838 spin_unlock_irq(&sighand->siglock);
1841 * Anything else is fatal, maybe with a core dump.
1843 current->flags |= PF_SIGNALED;
1845 if (sig_kernel_coredump(signr)) {
1846 if (print_fatal_signals)
1847 print_fatal_signal(regs, signr);
1849 * If it was able to dump core, this kills all
1850 * other threads in the group and synchronizes with
1851 * their demise. If we lost the race with another
1852 * thread getting here, it set group_exit_code
1853 * first and our do_group_exit call below will use
1854 * that value and ignore the one we pass it.
1856 do_coredump((long)signr, signr, regs);
1860 * Death signals, no core dump.
1862 do_group_exit(signr);
1865 spin_unlock_irq(&sighand->siglock);
1869 void exit_signals(struct task_struct *tsk)
1872 struct task_struct *t;
1874 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1875 tsk->flags |= PF_EXITING;
1879 spin_lock_irq(&tsk->sighand->siglock);
1881 * From now this task is not visible for group-wide signals,
1882 * see wants_signal(), do_signal_stop().
1884 tsk->flags |= PF_EXITING;
1885 if (!signal_pending(tsk))
1888 /* It could be that __group_complete_signal() choose us to
1889 * notify about group-wide signal. Another thread should be
1890 * woken now to take the signal since we will not.
1892 for (t = tsk; (t = next_thread(t)) != tsk; )
1893 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1894 recalc_sigpending_and_wake(t);
1896 if (unlikely(tsk->signal->group_stop_count) &&
1897 !--tsk->signal->group_stop_count) {
1898 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1902 spin_unlock_irq(&tsk->sighand->siglock);
1904 if (unlikely(group_stop)) {
1905 read_lock(&tasklist_lock);
1906 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1907 read_unlock(&tasklist_lock);
1911 EXPORT_SYMBOL(recalc_sigpending);
1912 EXPORT_SYMBOL_GPL(dequeue_signal);
1913 EXPORT_SYMBOL(flush_signals);
1914 EXPORT_SYMBOL(force_sig);
1915 EXPORT_SYMBOL(kill_proc);
1916 EXPORT_SYMBOL(ptrace_notify);
1917 EXPORT_SYMBOL(send_sig);
1918 EXPORT_SYMBOL(send_sig_info);
1919 EXPORT_SYMBOL(sigprocmask);
1920 EXPORT_SYMBOL(block_all_signals);
1921 EXPORT_SYMBOL(unblock_all_signals);
1925 * System call entry points.
1928 asmlinkage long sys_restart_syscall(void)
1930 struct restart_block *restart = ¤t_thread_info()->restart_block;
1931 return restart->fn(restart);
1934 long do_no_restart_syscall(struct restart_block *param)
1940 * We don't need to get the kernel lock - this is all local to this
1941 * particular thread.. (and that's good, because this is _heavily_
1942 * used by various programs)
1946 * This is also useful for kernel threads that want to temporarily
1947 * (or permanently) block certain signals.
1949 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1950 * interface happily blocks "unblockable" signals like SIGKILL
1953 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1957 spin_lock_irq(¤t->sighand->siglock);
1959 *oldset = current->blocked;
1964 sigorsets(¤t->blocked, ¤t->blocked, set);
1967 signandsets(¤t->blocked, ¤t->blocked, set);
1970 current->blocked = *set;
1975 recalc_sigpending();
1976 spin_unlock_irq(¤t->sighand->siglock);
1982 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1984 int error = -EINVAL;
1985 sigset_t old_set, new_set;
1987 /* XXX: Don't preclude handling different sized sigset_t's. */
1988 if (sigsetsize != sizeof(sigset_t))
1993 if (copy_from_user(&new_set, set, sizeof(*set)))
1995 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1997 error = sigprocmask(how, &new_set, &old_set);
2003 spin_lock_irq(¤t->sighand->siglock);
2004 old_set = current->blocked;
2005 spin_unlock_irq(¤t->sighand->siglock);
2009 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2017 long do_sigpending(void __user *set, unsigned long sigsetsize)
2019 long error = -EINVAL;
2022 if (sigsetsize > sizeof(sigset_t))
2025 spin_lock_irq(¤t->sighand->siglock);
2026 sigorsets(&pending, ¤t->pending.signal,
2027 ¤t->signal->shared_pending.signal);
2028 spin_unlock_irq(¤t->sighand->siglock);
2030 /* Outside the lock because only this thread touches it. */
2031 sigandsets(&pending, ¤t->blocked, &pending);
2034 if (!copy_to_user(set, &pending, sigsetsize))
2042 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2044 return do_sigpending(set, sigsetsize);
2047 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2049 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2053 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2055 if (from->si_code < 0)
2056 return __copy_to_user(to, from, sizeof(siginfo_t))
2059 * If you change siginfo_t structure, please be sure
2060 * this code is fixed accordingly.
2061 * Please remember to update the signalfd_copyinfo() function
2062 * inside fs/signalfd.c too, in case siginfo_t changes.
2063 * It should never copy any pad contained in the structure
2064 * to avoid security leaks, but must copy the generic
2065 * 3 ints plus the relevant union member.
2067 err = __put_user(from->si_signo, &to->si_signo);
2068 err |= __put_user(from->si_errno, &to->si_errno);
2069 err |= __put_user((short)from->si_code, &to->si_code);
2070 switch (from->si_code & __SI_MASK) {
2072 err |= __put_user(from->si_pid, &to->si_pid);
2073 err |= __put_user(from->si_uid, &to->si_uid);
2076 err |= __put_user(from->si_tid, &to->si_tid);
2077 err |= __put_user(from->si_overrun, &to->si_overrun);
2078 err |= __put_user(from->si_ptr, &to->si_ptr);
2081 err |= __put_user(from->si_band, &to->si_band);
2082 err |= __put_user(from->si_fd, &to->si_fd);
2085 err |= __put_user(from->si_addr, &to->si_addr);
2086 #ifdef __ARCH_SI_TRAPNO
2087 err |= __put_user(from->si_trapno, &to->si_trapno);
2091 err |= __put_user(from->si_pid, &to->si_pid);
2092 err |= __put_user(from->si_uid, &to->si_uid);
2093 err |= __put_user(from->si_status, &to->si_status);
2094 err |= __put_user(from->si_utime, &to->si_utime);
2095 err |= __put_user(from->si_stime, &to->si_stime);
2097 case __SI_RT: /* This is not generated by the kernel as of now. */
2098 case __SI_MESGQ: /* But this is */
2099 err |= __put_user(from->si_pid, &to->si_pid);
2100 err |= __put_user(from->si_uid, &to->si_uid);
2101 err |= __put_user(from->si_ptr, &to->si_ptr);
2103 default: /* this is just in case for now ... */
2104 err |= __put_user(from->si_pid, &to->si_pid);
2105 err |= __put_user(from->si_uid, &to->si_uid);
2114 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2115 siginfo_t __user *uinfo,
2116 const struct timespec __user *uts,
2125 /* XXX: Don't preclude handling different sized sigset_t's. */
2126 if (sigsetsize != sizeof(sigset_t))
2129 if (copy_from_user(&these, uthese, sizeof(these)))
2133 * Invert the set of allowed signals to get those we
2136 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2140 if (copy_from_user(&ts, uts, sizeof(ts)))
2142 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2147 spin_lock_irq(¤t->sighand->siglock);
2148 sig = dequeue_signal(current, &these, &info);
2150 timeout = MAX_SCHEDULE_TIMEOUT;
2152 timeout = (timespec_to_jiffies(&ts)
2153 + (ts.tv_sec || ts.tv_nsec));
2156 /* None ready -- temporarily unblock those we're
2157 * interested while we are sleeping in so that we'll
2158 * be awakened when they arrive. */
2159 current->real_blocked = current->blocked;
2160 sigandsets(¤t->blocked, ¤t->blocked, &these);
2161 recalc_sigpending();
2162 spin_unlock_irq(¤t->sighand->siglock);
2164 timeout = schedule_timeout_interruptible(timeout);
2166 spin_lock_irq(¤t->sighand->siglock);
2167 sig = dequeue_signal(current, &these, &info);
2168 current->blocked = current->real_blocked;
2169 siginitset(¤t->real_blocked, 0);
2170 recalc_sigpending();
2173 spin_unlock_irq(¤t->sighand->siglock);
2178 if (copy_siginfo_to_user(uinfo, &info))
2191 sys_kill(int pid, int sig)
2193 struct siginfo info;
2195 info.si_signo = sig;
2197 info.si_code = SI_USER;
2198 info.si_pid = task_tgid_vnr(current);
2199 info.si_uid = current->uid;
2201 return kill_something_info(sig, &info, pid);
2204 static int do_tkill(int tgid, int pid, int sig)
2207 struct siginfo info;
2208 struct task_struct *p;
2209 unsigned long flags;
2212 info.si_signo = sig;
2214 info.si_code = SI_TKILL;
2215 info.si_pid = task_tgid_vnr(current);
2216 info.si_uid = current->uid;
2219 p = find_task_by_vpid(pid);
2220 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2221 error = check_kill_permission(sig, &info, p);
2223 * The null signal is a permissions and process existence
2224 * probe. No signal is actually delivered.
2226 * If lock_task_sighand() fails we pretend the task dies
2227 * after receiving the signal. The window is tiny, and the
2228 * signal is private anyway.
2230 if (!error && sig && lock_task_sighand(p, &flags)) {
2231 error = specific_send_sig_info(sig, &info, p);
2232 unlock_task_sighand(p, &flags);
2241 * sys_tgkill - send signal to one specific thread
2242 * @tgid: the thread group ID of the thread
2243 * @pid: the PID of the thread
2244 * @sig: signal to be sent
2246 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2247 * exists but it's not belonging to the target process anymore. This
2248 * method solves the problem of threads exiting and PIDs getting reused.
2250 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2252 /* This is only valid for single tasks */
2253 if (pid <= 0 || tgid <= 0)
2256 return do_tkill(tgid, pid, sig);
2260 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2263 sys_tkill(int pid, int sig)
2265 /* This is only valid for single tasks */
2269 return do_tkill(0, pid, sig);
2273 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2277 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2280 /* Not even root can pretend to send signals from the kernel.
2281 Nor can they impersonate a kill(), which adds source info. */
2282 if (info.si_code >= 0)
2284 info.si_signo = sig;
2286 /* POSIX.1b doesn't mention process groups. */
2287 return kill_proc_info(sig, &info, pid);
2290 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2292 struct task_struct *t = current;
2293 struct k_sigaction *k;
2296 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2299 k = &t->sighand->action[sig-1];
2301 spin_lock_irq(¤t->sighand->siglock);
2306 sigdelsetmask(&act->sa.sa_mask,
2307 sigmask(SIGKILL) | sigmask(SIGSTOP));
2311 * "Setting a signal action to SIG_IGN for a signal that is
2312 * pending shall cause the pending signal to be discarded,
2313 * whether or not it is blocked."
2315 * "Setting a signal action to SIG_DFL for a signal that is
2316 * pending and whose default action is to ignore the signal
2317 * (for example, SIGCHLD), shall cause the pending signal to
2318 * be discarded, whether or not it is blocked"
2320 if (__sig_ignored(t, sig)) {
2322 sigaddset(&mask, sig);
2323 rm_from_queue_full(&mask, &t->signal->shared_pending);
2325 rm_from_queue_full(&mask, &t->pending);
2327 } while (t != current);
2331 spin_unlock_irq(¤t->sighand->siglock);
2336 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2342 oss.ss_sp = (void __user *) current->sas_ss_sp;
2343 oss.ss_size = current->sas_ss_size;
2344 oss.ss_flags = sas_ss_flags(sp);
2353 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2354 || __get_user(ss_sp, &uss->ss_sp)
2355 || __get_user(ss_flags, &uss->ss_flags)
2356 || __get_user(ss_size, &uss->ss_size))
2360 if (on_sig_stack(sp))
2366 * Note - this code used to test ss_flags incorrectly
2367 * old code may have been written using ss_flags==0
2368 * to mean ss_flags==SS_ONSTACK (as this was the only
2369 * way that worked) - this fix preserves that older
2372 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2375 if (ss_flags == SS_DISABLE) {
2380 if (ss_size < MINSIGSTKSZ)
2384 current->sas_ss_sp = (unsigned long) ss_sp;
2385 current->sas_ss_size = ss_size;
2390 if (copy_to_user(uoss, &oss, sizeof(oss)))
2399 #ifdef __ARCH_WANT_SYS_SIGPENDING
2402 sys_sigpending(old_sigset_t __user *set)
2404 return do_sigpending(set, sizeof(*set));
2409 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2410 /* Some platforms have their own version with special arguments others
2411 support only sys_rt_sigprocmask. */
2414 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2417 old_sigset_t old_set, new_set;
2421 if (copy_from_user(&new_set, set, sizeof(*set)))
2423 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2425 spin_lock_irq(¤t->sighand->siglock);
2426 old_set = current->blocked.sig[0];
2434 sigaddsetmask(¤t->blocked, new_set);
2437 sigdelsetmask(¤t->blocked, new_set);
2440 current->blocked.sig[0] = new_set;
2444 recalc_sigpending();
2445 spin_unlock_irq(¤t->sighand->siglock);
2451 old_set = current->blocked.sig[0];
2454 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2461 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2463 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2465 sys_rt_sigaction(int sig,
2466 const struct sigaction __user *act,
2467 struct sigaction __user *oact,
2470 struct k_sigaction new_sa, old_sa;
2473 /* XXX: Don't preclude handling different sized sigset_t's. */
2474 if (sigsetsize != sizeof(sigset_t))
2478 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2482 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2485 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2491 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2493 #ifdef __ARCH_WANT_SYS_SGETMASK
2496 * For backwards compatibility. Functionality superseded by sigprocmask.
2502 return current->blocked.sig[0];
2506 sys_ssetmask(int newmask)
2510 spin_lock_irq(¤t->sighand->siglock);
2511 old = current->blocked.sig[0];
2513 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2515 recalc_sigpending();
2516 spin_unlock_irq(¤t->sighand->siglock);
2520 #endif /* __ARCH_WANT_SGETMASK */
2522 #ifdef __ARCH_WANT_SYS_SIGNAL
2524 * For backwards compatibility. Functionality superseded by sigaction.
2526 asmlinkage unsigned long
2527 sys_signal(int sig, __sighandler_t handler)
2529 struct k_sigaction new_sa, old_sa;
2532 new_sa.sa.sa_handler = handler;
2533 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2534 sigemptyset(&new_sa.sa.sa_mask);
2536 ret = do_sigaction(sig, &new_sa, &old_sa);
2538 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2540 #endif /* __ARCH_WANT_SYS_SIGNAL */
2542 #ifdef __ARCH_WANT_SYS_PAUSE
2547 current->state = TASK_INTERRUPTIBLE;
2549 return -ERESTARTNOHAND;
2554 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2555 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2559 /* XXX: Don't preclude handling different sized sigset_t's. */
2560 if (sigsetsize != sizeof(sigset_t))
2563 if (copy_from_user(&newset, unewset, sizeof(newset)))
2565 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2567 spin_lock_irq(¤t->sighand->siglock);
2568 current->saved_sigmask = current->blocked;
2569 current->blocked = newset;
2570 recalc_sigpending();
2571 spin_unlock_irq(¤t->sighand->siglock);
2573 current->state = TASK_INTERRUPTIBLE;
2575 set_restore_sigmask();
2576 return -ERESTARTNOHAND;
2578 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2580 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2585 void __init signals_init(void)
2587 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);