2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache *sigqueue_cachep;
44 DEFINE_TRACE(sched_signal_send);
46 static void __user *sig_handler(struct task_struct *t, int sig)
48 return t->sighand->action[sig - 1].sa.sa_handler;
51 static int sig_handler_ignored(void __user *handler, int sig)
53 /* Is it explicitly or implicitly ignored? */
54 return handler == SIG_IGN ||
55 (handler == SIG_DFL && sig_kernel_ignore(sig));
58 static int sig_ignored(struct task_struct *t, int sig)
63 * Blocked signals are never ignored, since the
64 * signal handler may change by the time it is
67 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
70 handler = sig_handler(t, sig);
71 if (!sig_handler_ignored(handler, sig))
75 * Tracers may want to know about even ignored signals.
77 return !tracehook_consider_ignored_signal(t, sig, handler);
81 * Re-calculate pending state from the set of locally pending
82 * signals, globally pending signals, and blocked signals.
84 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
89 switch (_NSIG_WORDS) {
91 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
92 ready |= signal->sig[i] &~ blocked->sig[i];
95 case 4: ready = signal->sig[3] &~ blocked->sig[3];
96 ready |= signal->sig[2] &~ blocked->sig[2];
97 ready |= signal->sig[1] &~ blocked->sig[1];
98 ready |= signal->sig[0] &~ blocked->sig[0];
101 case 2: ready = signal->sig[1] &~ blocked->sig[1];
102 ready |= signal->sig[0] &~ blocked->sig[0];
105 case 1: ready = signal->sig[0] &~ blocked->sig[0];
110 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
112 static int recalc_sigpending_tsk(struct task_struct *t)
114 if (t->signal->group_stop_count > 0 ||
115 PENDING(&t->pending, &t->blocked) ||
116 PENDING(&t->signal->shared_pending, &t->blocked)) {
117 set_tsk_thread_flag(t, TIF_SIGPENDING);
121 * We must never clear the flag in another thread, or in current
122 * when it's possible the current syscall is returning -ERESTART*.
123 * So we don't clear it here, and only callers who know they should do.
129 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
130 * This is superfluous when called on current, the wakeup is a harmless no-op.
132 void recalc_sigpending_and_wake(struct task_struct *t)
134 if (recalc_sigpending_tsk(t))
135 signal_wake_up(t, 0);
138 void recalc_sigpending(void)
140 if (unlikely(tracehook_force_sigpending()))
141 set_thread_flag(TIF_SIGPENDING);
142 else if (!recalc_sigpending_tsk(current) && !freezing(current))
143 clear_thread_flag(TIF_SIGPENDING);
147 /* Given the mask, find the first available signal that should be serviced. */
149 int next_signal(struct sigpending *pending, sigset_t *mask)
151 unsigned long i, *s, *m, x;
154 s = pending->signal.sig;
156 switch (_NSIG_WORDS) {
158 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
159 if ((x = *s &~ *m) != 0) {
160 sig = ffz(~x) + i*_NSIG_BPW + 1;
165 case 2: if ((x = s[0] &~ m[0]) != 0)
167 else if ((x = s[1] &~ m[1]) != 0)
174 case 1: if ((x = *s &~ *m) != 0)
182 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
185 struct sigqueue *q = NULL;
186 struct user_struct *user;
189 * In order to avoid problems with "switch_user()", we want to make
190 * sure that the compiler doesn't re-load "t->user"
194 atomic_inc(&user->sigpending);
195 if (override_rlimit ||
196 atomic_read(&user->sigpending) <=
197 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
198 q = kmem_cache_alloc(sigqueue_cachep, flags);
199 if (unlikely(q == NULL)) {
200 atomic_dec(&user->sigpending);
202 INIT_LIST_HEAD(&q->list);
204 q->user = get_uid(user);
209 static void __sigqueue_free(struct sigqueue *q)
211 if (q->flags & SIGQUEUE_PREALLOC)
213 atomic_dec(&q->user->sigpending);
215 kmem_cache_free(sigqueue_cachep, q);
218 void flush_sigqueue(struct sigpending *queue)
222 sigemptyset(&queue->signal);
223 while (!list_empty(&queue->list)) {
224 q = list_entry(queue->list.next, struct sigqueue , list);
225 list_del_init(&q->list);
231 * Flush all pending signals for a task.
233 void flush_signals(struct task_struct *t)
237 spin_lock_irqsave(&t->sighand->siglock, flags);
238 clear_tsk_thread_flag(t, TIF_SIGPENDING);
239 flush_sigqueue(&t->pending);
240 flush_sigqueue(&t->signal->shared_pending);
241 spin_unlock_irqrestore(&t->sighand->siglock, flags);
244 static void __flush_itimer_signals(struct sigpending *pending)
246 sigset_t signal, retain;
247 struct sigqueue *q, *n;
249 signal = pending->signal;
250 sigemptyset(&retain);
252 list_for_each_entry_safe(q, n, &pending->list, list) {
253 int sig = q->info.si_signo;
255 if (likely(q->info.si_code != SI_TIMER)) {
256 sigaddset(&retain, sig);
258 sigdelset(&signal, sig);
259 list_del_init(&q->list);
264 sigorsets(&pending->signal, &signal, &retain);
267 void flush_itimer_signals(void)
269 struct task_struct *tsk = current;
272 spin_lock_irqsave(&tsk->sighand->siglock, flags);
273 __flush_itimer_signals(&tsk->pending);
274 __flush_itimer_signals(&tsk->signal->shared_pending);
275 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
278 void ignore_signals(struct task_struct *t)
282 for (i = 0; i < _NSIG; ++i)
283 t->sighand->action[i].sa.sa_handler = SIG_IGN;
289 * Flush all handlers for a task.
293 flush_signal_handlers(struct task_struct *t, int force_default)
296 struct k_sigaction *ka = &t->sighand->action[0];
297 for (i = _NSIG ; i != 0 ; i--) {
298 if (force_default || ka->sa.sa_handler != SIG_IGN)
299 ka->sa.sa_handler = SIG_DFL;
301 sigemptyset(&ka->sa.sa_mask);
306 int unhandled_signal(struct task_struct *tsk, int sig)
308 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
309 if (is_global_init(tsk))
311 if (handler != SIG_IGN && handler != SIG_DFL)
313 return !tracehook_consider_fatal_signal(tsk, sig, handler);
317 /* Notify the system that a driver wants to block all signals for this
318 * process, and wants to be notified if any signals at all were to be
319 * sent/acted upon. If the notifier routine returns non-zero, then the
320 * signal will be acted upon after all. If the notifier routine returns 0,
321 * then then signal will be blocked. Only one block per process is
322 * allowed. priv is a pointer to private data that the notifier routine
323 * can use to determine if the signal should be blocked or not. */
326 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
330 spin_lock_irqsave(¤t->sighand->siglock, flags);
331 current->notifier_mask = mask;
332 current->notifier_data = priv;
333 current->notifier = notifier;
334 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
337 /* Notify the system that blocking has ended. */
340 unblock_all_signals(void)
344 spin_lock_irqsave(¤t->sighand->siglock, flags);
345 current->notifier = NULL;
346 current->notifier_data = NULL;
348 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
351 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
353 struct sigqueue *q, *first = NULL;
356 * Collect the siginfo appropriate to this signal. Check if
357 * there is another siginfo for the same signal.
359 list_for_each_entry(q, &list->list, list) {
360 if (q->info.si_signo == sig) {
367 sigdelset(&list->signal, sig);
371 list_del_init(&first->list);
372 copy_siginfo(info, &first->info);
373 __sigqueue_free(first);
375 /* Ok, it wasn't in the queue. This must be
376 a fast-pathed signal or we must have been
377 out of queue space. So zero out the info.
379 info->si_signo = sig;
387 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
390 int sig = next_signal(pending, mask);
393 if (current->notifier) {
394 if (sigismember(current->notifier_mask, sig)) {
395 if (!(current->notifier)(current->notifier_data)) {
396 clear_thread_flag(TIF_SIGPENDING);
402 collect_signal(sig, pending, info);
409 * Dequeue a signal and return the element to the caller, which is
410 * expected to free it.
412 * All callers have to hold the siglock.
414 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
418 /* We only dequeue private signals from ourselves, we don't let
419 * signalfd steal them
421 signr = __dequeue_signal(&tsk->pending, mask, info);
423 signr = __dequeue_signal(&tsk->signal->shared_pending,
428 * itimers are process shared and we restart periodic
429 * itimers in the signal delivery path to prevent DoS
430 * attacks in the high resolution timer case. This is
431 * compliant with the old way of self restarting
432 * itimers, as the SIGALRM is a legacy signal and only
433 * queued once. Changing the restart behaviour to
434 * restart the timer in the signal dequeue path is
435 * reducing the timer noise on heavy loaded !highres
438 if (unlikely(signr == SIGALRM)) {
439 struct hrtimer *tmr = &tsk->signal->real_timer;
441 if (!hrtimer_is_queued(tmr) &&
442 tsk->signal->it_real_incr.tv64 != 0) {
443 hrtimer_forward(tmr, tmr->base->get_time(),
444 tsk->signal->it_real_incr);
445 hrtimer_restart(tmr);
454 if (unlikely(sig_kernel_stop(signr))) {
456 * Set a marker that we have dequeued a stop signal. Our
457 * caller might release the siglock and then the pending
458 * stop signal it is about to process is no longer in the
459 * pending bitmasks, but must still be cleared by a SIGCONT
460 * (and overruled by a SIGKILL). So those cases clear this
461 * shared flag after we've set it. Note that this flag may
462 * remain set after the signal we return is ignored or
463 * handled. That doesn't matter because its only purpose
464 * is to alert stop-signal processing code when another
465 * processor has come along and cleared the flag.
467 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
469 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
471 * Release the siglock to ensure proper locking order
472 * of timer locks outside of siglocks. Note, we leave
473 * irqs disabled here, since the posix-timers code is
474 * about to disable them again anyway.
476 spin_unlock(&tsk->sighand->siglock);
477 do_schedule_next_timer(info);
478 spin_lock(&tsk->sighand->siglock);
484 * Tell a process that it has a new active signal..
486 * NOTE! we rely on the previous spin_lock to
487 * lock interrupts for us! We can only be called with
488 * "siglock" held, and the local interrupt must
489 * have been disabled when that got acquired!
491 * No need to set need_resched since signal event passing
492 * goes through ->blocked
494 void signal_wake_up(struct task_struct *t, int resume)
498 set_tsk_thread_flag(t, TIF_SIGPENDING);
501 * For SIGKILL, we want to wake it up in the stopped/traced/killable
502 * case. We don't check t->state here because there is a race with it
503 * executing another processor and just now entering stopped state.
504 * By using wake_up_state, we ensure the process will wake up and
505 * handle its death signal.
507 mask = TASK_INTERRUPTIBLE;
509 mask |= TASK_WAKEKILL;
510 if (!wake_up_state(t, mask))
515 * Remove signals in mask from the pending set and queue.
516 * Returns 1 if any signals were found.
518 * All callers must be holding the siglock.
520 * This version takes a sigset mask and looks at all signals,
521 * not just those in the first mask word.
523 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
525 struct sigqueue *q, *n;
528 sigandsets(&m, mask, &s->signal);
529 if (sigisemptyset(&m))
532 signandsets(&s->signal, &s->signal, mask);
533 list_for_each_entry_safe(q, n, &s->list, list) {
534 if (sigismember(mask, q->info.si_signo)) {
535 list_del_init(&q->list);
542 * Remove signals in mask from the pending set and queue.
543 * Returns 1 if any signals were found.
545 * All callers must be holding the siglock.
547 static int rm_from_queue(unsigned long mask, struct sigpending *s)
549 struct sigqueue *q, *n;
551 if (!sigtestsetmask(&s->signal, mask))
554 sigdelsetmask(&s->signal, mask);
555 list_for_each_entry_safe(q, n, &s->list, list) {
556 if (q->info.si_signo < SIGRTMIN &&
557 (mask & sigmask(q->info.si_signo))) {
558 list_del_init(&q->list);
566 * Bad permissions for sending the signal
568 static int check_kill_permission(int sig, struct siginfo *info,
569 struct task_struct *t)
574 if (!valid_signal(sig))
577 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
580 error = audit_signal_info(sig, t); /* Let audit system see the signal */
584 if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
585 (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
586 !capable(CAP_KILL)) {
589 sid = task_session(t);
591 * We don't return the error if sid == NULL. The
592 * task was unhashed, the caller must notice this.
594 if (!sid || sid == task_session(current))
601 return security_task_kill(t, info, sig, 0);
605 * Handle magic process-wide effects of stop/continue signals. Unlike
606 * the signal actions, these happen immediately at signal-generation
607 * time regardless of blocking, ignoring, or handling. This does the
608 * actual continuing for SIGCONT, but not the actual stopping for stop
609 * signals. The process stop is done as a signal action for SIG_DFL.
611 * Returns true if the signal should be actually delivered, otherwise
612 * it should be dropped.
614 static int prepare_signal(int sig, struct task_struct *p)
616 struct signal_struct *signal = p->signal;
617 struct task_struct *t;
619 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
621 * The process is in the middle of dying, nothing to do.
623 } else if (sig_kernel_stop(sig)) {
625 * This is a stop signal. Remove SIGCONT from all queues.
627 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
630 rm_from_queue(sigmask(SIGCONT), &t->pending);
631 } while_each_thread(p, t);
632 } else if (sig == SIGCONT) {
635 * Remove all stop signals from all queues,
636 * and wake all threads.
638 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
642 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
644 * If there is a handler for SIGCONT, we must make
645 * sure that no thread returns to user mode before
646 * we post the signal, in case it was the only
647 * thread eligible to run the signal handler--then
648 * it must not do anything between resuming and
649 * running the handler. With the TIF_SIGPENDING
650 * flag set, the thread will pause and acquire the
651 * siglock that we hold now and until we've queued
652 * the pending signal.
654 * Wake up the stopped thread _after_ setting
657 state = __TASK_STOPPED;
658 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
659 set_tsk_thread_flag(t, TIF_SIGPENDING);
660 state |= TASK_INTERRUPTIBLE;
662 wake_up_state(t, state);
663 } while_each_thread(p, t);
666 * Notify the parent with CLD_CONTINUED if we were stopped.
668 * If we were in the middle of a group stop, we pretend it
669 * was already finished, and then continued. Since SIGCHLD
670 * doesn't queue we report only CLD_STOPPED, as if the next
671 * CLD_CONTINUED was dropped.
674 if (signal->flags & SIGNAL_STOP_STOPPED)
675 why |= SIGNAL_CLD_CONTINUED;
676 else if (signal->group_stop_count)
677 why |= SIGNAL_CLD_STOPPED;
681 * The first thread which returns from finish_stop()
682 * will take ->siglock, notice SIGNAL_CLD_MASK, and
683 * notify its parent. See get_signal_to_deliver().
685 signal->flags = why | SIGNAL_STOP_CONTINUED;
686 signal->group_stop_count = 0;
687 signal->group_exit_code = 0;
690 * We are not stopped, but there could be a stop
691 * signal in the middle of being processed after
692 * being removed from the queue. Clear that too.
694 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
698 return !sig_ignored(p, sig);
702 * Test if P wants to take SIG. After we've checked all threads with this,
703 * it's equivalent to finding no threads not blocking SIG. Any threads not
704 * blocking SIG were ruled out because they are not running and already
705 * have pending signals. Such threads will dequeue from the shared queue
706 * as soon as they're available, so putting the signal on the shared queue
707 * will be equivalent to sending it to one such thread.
709 static inline int wants_signal(int sig, struct task_struct *p)
711 if (sigismember(&p->blocked, sig))
713 if (p->flags & PF_EXITING)
717 if (task_is_stopped_or_traced(p))
719 return task_curr(p) || !signal_pending(p);
722 static void complete_signal(int sig, struct task_struct *p, int group)
724 struct signal_struct *signal = p->signal;
725 struct task_struct *t;
728 * Now find a thread we can wake up to take the signal off the queue.
730 * If the main thread wants the signal, it gets first crack.
731 * Probably the least surprising to the average bear.
733 if (wants_signal(sig, p))
735 else if (!group || thread_group_empty(p))
737 * There is just one thread and it does not need to be woken.
738 * It will dequeue unblocked signals before it runs again.
743 * Otherwise try to find a suitable thread.
745 t = signal->curr_target;
746 while (!wants_signal(sig, t)) {
748 if (t == signal->curr_target)
750 * No thread needs to be woken.
751 * Any eligible threads will see
752 * the signal in the queue soon.
756 signal->curr_target = t;
760 * Found a killable thread. If the signal will be fatal,
761 * then start taking the whole group down immediately.
763 if (sig_fatal(p, sig) &&
764 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
765 !sigismember(&t->real_blocked, sig) &&
767 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
769 * This signal will be fatal to the whole group.
771 if (!sig_kernel_coredump(sig)) {
773 * Start a group exit and wake everybody up.
774 * This way we don't have other threads
775 * running and doing things after a slower
776 * thread has the fatal signal pending.
778 signal->flags = SIGNAL_GROUP_EXIT;
779 signal->group_exit_code = sig;
780 signal->group_stop_count = 0;
783 sigaddset(&t->pending.signal, SIGKILL);
784 signal_wake_up(t, 1);
785 } while_each_thread(p, t);
791 * The signal is already in the shared-pending queue.
792 * Tell the chosen thread to wake up and dequeue it.
794 signal_wake_up(t, sig == SIGKILL);
798 static inline int legacy_queue(struct sigpending *signals, int sig)
800 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
803 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
806 struct sigpending *pending;
809 trace_sched_signal_send(sig, t);
811 assert_spin_locked(&t->sighand->siglock);
812 if (!prepare_signal(sig, t))
815 pending = group ? &t->signal->shared_pending : &t->pending;
817 * Short-circuit ignored signals and support queuing
818 * exactly one non-rt signal, so that we can get more
819 * detailed information about the cause of the signal.
821 if (legacy_queue(pending, sig))
824 * fast-pathed signals for kernel-internal things like SIGSTOP
827 if (info == SEND_SIG_FORCED)
830 /* Real-time signals must be queued if sent by sigqueue, or
831 some other real-time mechanism. It is implementation
832 defined whether kill() does so. We attempt to do so, on
833 the principle of least surprise, but since kill is not
834 allowed to fail with EAGAIN when low on memory we just
835 make sure at least one signal gets delivered and don't
836 pass on the info struct. */
838 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
839 (is_si_special(info) ||
840 info->si_code >= 0)));
842 list_add_tail(&q->list, &pending->list);
843 switch ((unsigned long) info) {
844 case (unsigned long) SEND_SIG_NOINFO:
845 q->info.si_signo = sig;
846 q->info.si_errno = 0;
847 q->info.si_code = SI_USER;
848 q->info.si_pid = task_pid_vnr(current);
849 q->info.si_uid = current->uid;
851 case (unsigned long) SEND_SIG_PRIV:
852 q->info.si_signo = sig;
853 q->info.si_errno = 0;
854 q->info.si_code = SI_KERNEL;
859 copy_siginfo(&q->info, info);
862 } else if (!is_si_special(info)) {
863 if (sig >= SIGRTMIN && info->si_code != SI_USER)
865 * Queue overflow, abort. We may abort if the signal was rt
866 * and sent by user using something other than kill().
872 signalfd_notify(t, sig);
873 sigaddset(&pending->signal, sig);
874 complete_signal(sig, t, group);
878 int print_fatal_signals;
880 static void print_fatal_signal(struct pt_regs *regs, int signr)
882 printk("%s/%d: potentially unexpected fatal signal %d.\n",
883 current->comm, task_pid_nr(current), signr);
885 #if defined(__i386__) && !defined(__arch_um__)
886 printk("code at %08lx: ", regs->ip);
889 for (i = 0; i < 16; i++) {
892 __get_user(insn, (unsigned char *)(regs->ip + i));
893 printk("%02x ", insn);
901 static int __init setup_print_fatal_signals(char *str)
903 get_option (&str, &print_fatal_signals);
908 __setup("print-fatal-signals=", setup_print_fatal_signals);
911 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
913 return send_signal(sig, info, p, 1);
917 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
919 return send_signal(sig, info, t, 0);
923 * Force a signal that the process can't ignore: if necessary
924 * we unblock the signal and change any SIG_IGN to SIG_DFL.
926 * Note: If we unblock the signal, we always reset it to SIG_DFL,
927 * since we do not want to have a signal handler that was blocked
928 * be invoked when user space had explicitly blocked it.
930 * We don't want to have recursive SIGSEGV's etc, for example,
931 * that is why we also clear SIGNAL_UNKILLABLE.
934 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
936 unsigned long int flags;
937 int ret, blocked, ignored;
938 struct k_sigaction *action;
940 spin_lock_irqsave(&t->sighand->siglock, flags);
941 action = &t->sighand->action[sig-1];
942 ignored = action->sa.sa_handler == SIG_IGN;
943 blocked = sigismember(&t->blocked, sig);
944 if (blocked || ignored) {
945 action->sa.sa_handler = SIG_DFL;
947 sigdelset(&t->blocked, sig);
948 recalc_sigpending_and_wake(t);
951 if (action->sa.sa_handler == SIG_DFL)
952 t->signal->flags &= ~SIGNAL_UNKILLABLE;
953 ret = specific_send_sig_info(sig, info, t);
954 spin_unlock_irqrestore(&t->sighand->siglock, flags);
960 force_sig_specific(int sig, struct task_struct *t)
962 force_sig_info(sig, SEND_SIG_FORCED, t);
966 * Nuke all other threads in the group.
968 void zap_other_threads(struct task_struct *p)
970 struct task_struct *t;
972 p->signal->group_stop_count = 0;
974 for (t = next_thread(p); t != p; t = next_thread(t)) {
976 * Don't bother with already dead threads
981 /* SIGKILL will be handled before any pending SIGSTOP */
982 sigaddset(&t->pending.signal, SIGKILL);
983 signal_wake_up(t, 1);
987 int __fatal_signal_pending(struct task_struct *tsk)
989 return sigismember(&tsk->pending.signal, SIGKILL);
991 EXPORT_SYMBOL(__fatal_signal_pending);
993 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
995 struct sighand_struct *sighand;
999 sighand = rcu_dereference(tsk->sighand);
1000 if (unlikely(sighand == NULL))
1003 spin_lock_irqsave(&sighand->siglock, *flags);
1004 if (likely(sighand == tsk->sighand))
1006 spin_unlock_irqrestore(&sighand->siglock, *flags);
1013 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1015 unsigned long flags;
1018 ret = check_kill_permission(sig, info, p);
1022 if (lock_task_sighand(p, &flags)) {
1023 ret = __group_send_sig_info(sig, info, p);
1024 unlock_task_sighand(p, &flags);
1032 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1033 * control characters do (^C, ^Z etc)
1036 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1038 struct task_struct *p = NULL;
1039 int retval, success;
1043 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1044 int err = group_send_sig_info(sig, info, p);
1047 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1048 return success ? 0 : retval;
1051 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1054 struct task_struct *p;
1058 p = pid_task(pid, PIDTYPE_PID);
1060 error = group_send_sig_info(sig, info, p);
1061 if (unlikely(error == -ESRCH))
1063 * The task was unhashed in between, try again.
1064 * If it is dead, pid_task() will return NULL,
1065 * if we race with de_thread() it will find the
1076 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1080 error = kill_pid_info(sig, info, find_vpid(pid));
1085 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1086 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1087 uid_t uid, uid_t euid, u32 secid)
1090 struct task_struct *p;
1092 if (!valid_signal(sig))
1095 read_lock(&tasklist_lock);
1096 p = pid_task(pid, PIDTYPE_PID);
1101 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1102 && (euid != p->suid) && (euid != p->uid)
1103 && (uid != p->suid) && (uid != p->uid)) {
1107 ret = security_task_kill(p, info, sig, secid);
1110 if (sig && p->sighand) {
1111 unsigned long flags;
1112 spin_lock_irqsave(&p->sighand->siglock, flags);
1113 ret = __group_send_sig_info(sig, info, p);
1114 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1117 read_unlock(&tasklist_lock);
1120 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1123 * kill_something_info() interprets pid in interesting ways just like kill(2).
1125 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1126 * is probably wrong. Should make it like BSD or SYSV.
1129 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1135 ret = kill_pid_info(sig, info, find_vpid(pid));
1140 read_lock(&tasklist_lock);
1142 ret = __kill_pgrp_info(sig, info,
1143 pid ? find_vpid(-pid) : task_pgrp(current));
1145 int retval = 0, count = 0;
1146 struct task_struct * p;
1148 for_each_process(p) {
1149 if (task_pid_vnr(p) > 1 &&
1150 !same_thread_group(p, current)) {
1151 int err = group_send_sig_info(sig, info, p);
1157 ret = count ? retval : -ESRCH;
1159 read_unlock(&tasklist_lock);
1165 * These are for backward compatibility with the rest of the kernel source.
1169 * The caller must ensure the task can't exit.
1172 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1175 unsigned long flags;
1178 * Make sure legacy kernel users don't send in bad values
1179 * (normal paths check this in check_kill_permission).
1181 if (!valid_signal(sig))
1184 spin_lock_irqsave(&p->sighand->siglock, flags);
1185 ret = specific_send_sig_info(sig, info, p);
1186 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1190 #define __si_special(priv) \
1191 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1194 send_sig(int sig, struct task_struct *p, int priv)
1196 return send_sig_info(sig, __si_special(priv), p);
1200 force_sig(int sig, struct task_struct *p)
1202 force_sig_info(sig, SEND_SIG_PRIV, p);
1206 * When things go south during signal handling, we
1207 * will force a SIGSEGV. And if the signal that caused
1208 * the problem was already a SIGSEGV, we'll want to
1209 * make sure we don't even try to deliver the signal..
1212 force_sigsegv(int sig, struct task_struct *p)
1214 if (sig == SIGSEGV) {
1215 unsigned long flags;
1216 spin_lock_irqsave(&p->sighand->siglock, flags);
1217 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1218 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1220 force_sig(SIGSEGV, p);
1224 int kill_pgrp(struct pid *pid, int sig, int priv)
1228 read_lock(&tasklist_lock);
1229 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1230 read_unlock(&tasklist_lock);
1234 EXPORT_SYMBOL(kill_pgrp);
1236 int kill_pid(struct pid *pid, int sig, int priv)
1238 return kill_pid_info(sig, __si_special(priv), pid);
1240 EXPORT_SYMBOL(kill_pid);
1243 * These functions support sending signals using preallocated sigqueue
1244 * structures. This is needed "because realtime applications cannot
1245 * afford to lose notifications of asynchronous events, like timer
1246 * expirations or I/O completions". In the case of Posix Timers
1247 * we allocate the sigqueue structure from the timer_create. If this
1248 * allocation fails we are able to report the failure to the application
1249 * with an EAGAIN error.
1252 struct sigqueue *sigqueue_alloc(void)
1256 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1257 q->flags |= SIGQUEUE_PREALLOC;
1261 void sigqueue_free(struct sigqueue *q)
1263 unsigned long flags;
1264 spinlock_t *lock = ¤t->sighand->siglock;
1266 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1268 * We must hold ->siglock while testing q->list
1269 * to serialize with collect_signal() or with
1270 * __exit_signal()->flush_sigqueue().
1272 spin_lock_irqsave(lock, flags);
1273 q->flags &= ~SIGQUEUE_PREALLOC;
1275 * If it is queued it will be freed when dequeued,
1276 * like the "regular" sigqueue.
1278 if (!list_empty(&q->list))
1280 spin_unlock_irqrestore(lock, flags);
1286 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1288 int sig = q->info.si_signo;
1289 struct sigpending *pending;
1290 unsigned long flags;
1293 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1296 if (!likely(lock_task_sighand(t, &flags)))
1299 ret = 1; /* the signal is ignored */
1300 if (!prepare_signal(sig, t))
1304 if (unlikely(!list_empty(&q->list))) {
1306 * If an SI_TIMER entry is already queue just increment
1307 * the overrun count.
1309 BUG_ON(q->info.si_code != SI_TIMER);
1310 q->info.si_overrun++;
1313 q->info.si_overrun = 0;
1315 signalfd_notify(t, sig);
1316 pending = group ? &t->signal->shared_pending : &t->pending;
1317 list_add_tail(&q->list, &pending->list);
1318 sigaddset(&pending->signal, sig);
1319 complete_signal(sig, t, group);
1321 unlock_task_sighand(t, &flags);
1327 * Wake up any threads in the parent blocked in wait* syscalls.
1329 static inline void __wake_up_parent(struct task_struct *p,
1330 struct task_struct *parent)
1332 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1336 * Let a parent know about the death of a child.
1337 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1339 * Returns -1 if our parent ignored us and so we've switched to
1340 * self-reaping, or else @sig.
1342 int do_notify_parent(struct task_struct *tsk, int sig)
1344 struct siginfo info;
1345 unsigned long flags;
1346 struct sighand_struct *psig;
1347 struct task_cputime cputime;
1352 /* do_notify_parent_cldstop should have been called instead. */
1353 BUG_ON(task_is_stopped_or_traced(tsk));
1355 BUG_ON(!tsk->ptrace &&
1356 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1358 info.si_signo = sig;
1361 * we are under tasklist_lock here so our parent is tied to
1362 * us and cannot exit and release its namespace.
1364 * the only it can is to switch its nsproxy with sys_unshare,
1365 * bu uncharing pid namespaces is not allowed, so we'll always
1366 * see relevant namespace
1368 * write_lock() currently calls preempt_disable() which is the
1369 * same as rcu_read_lock(), but according to Oleg, this is not
1370 * correct to rely on this
1373 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1376 info.si_uid = tsk->uid;
1378 thread_group_cputime(tsk, &cputime);
1379 info.si_utime = cputime_to_jiffies(cputime.utime);
1380 info.si_stime = cputime_to_jiffies(cputime.stime);
1382 info.si_status = tsk->exit_code & 0x7f;
1383 if (tsk->exit_code & 0x80)
1384 info.si_code = CLD_DUMPED;
1385 else if (tsk->exit_code & 0x7f)
1386 info.si_code = CLD_KILLED;
1388 info.si_code = CLD_EXITED;
1389 info.si_status = tsk->exit_code >> 8;
1392 psig = tsk->parent->sighand;
1393 spin_lock_irqsave(&psig->siglock, flags);
1394 if (!tsk->ptrace && sig == SIGCHLD &&
1395 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1396 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1398 * We are exiting and our parent doesn't care. POSIX.1
1399 * defines special semantics for setting SIGCHLD to SIG_IGN
1400 * or setting the SA_NOCLDWAIT flag: we should be reaped
1401 * automatically and not left for our parent's wait4 call.
1402 * Rather than having the parent do it as a magic kind of
1403 * signal handler, we just set this to tell do_exit that we
1404 * can be cleaned up without becoming a zombie. Note that
1405 * we still call __wake_up_parent in this case, because a
1406 * blocked sys_wait4 might now return -ECHILD.
1408 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1409 * is implementation-defined: we do (if you don't want
1410 * it, just use SIG_IGN instead).
1412 ret = tsk->exit_signal = -1;
1413 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1416 if (valid_signal(sig) && sig > 0)
1417 __group_send_sig_info(sig, &info, tsk->parent);
1418 __wake_up_parent(tsk, tsk->parent);
1419 spin_unlock_irqrestore(&psig->siglock, flags);
1424 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1426 struct siginfo info;
1427 unsigned long flags;
1428 struct task_struct *parent;
1429 struct sighand_struct *sighand;
1431 if (tsk->ptrace & PT_PTRACED)
1432 parent = tsk->parent;
1434 tsk = tsk->group_leader;
1435 parent = tsk->real_parent;
1438 info.si_signo = SIGCHLD;
1441 * see comment in do_notify_parent() abot the following 3 lines
1444 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1447 info.si_uid = tsk->uid;
1449 info.si_utime = cputime_to_clock_t(tsk->utime);
1450 info.si_stime = cputime_to_clock_t(tsk->stime);
1455 info.si_status = SIGCONT;
1458 info.si_status = tsk->signal->group_exit_code & 0x7f;
1461 info.si_status = tsk->exit_code & 0x7f;
1467 sighand = parent->sighand;
1468 spin_lock_irqsave(&sighand->siglock, flags);
1469 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1470 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1471 __group_send_sig_info(SIGCHLD, &info, parent);
1473 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1475 __wake_up_parent(tsk, parent);
1476 spin_unlock_irqrestore(&sighand->siglock, flags);
1479 static inline int may_ptrace_stop(void)
1481 if (!likely(current->ptrace & PT_PTRACED))
1484 * Are we in the middle of do_coredump?
1485 * If so and our tracer is also part of the coredump stopping
1486 * is a deadlock situation, and pointless because our tracer
1487 * is dead so don't allow us to stop.
1488 * If SIGKILL was already sent before the caller unlocked
1489 * ->siglock we must see ->core_state != NULL. Otherwise it
1490 * is safe to enter schedule().
1492 if (unlikely(current->mm->core_state) &&
1493 unlikely(current->mm == current->parent->mm))
1500 * Return nonzero if there is a SIGKILL that should be waking us up.
1501 * Called with the siglock held.
1503 static int sigkill_pending(struct task_struct *tsk)
1505 return sigismember(&tsk->pending.signal, SIGKILL) ||
1506 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1510 * This must be called with current->sighand->siglock held.
1512 * This should be the path for all ptrace stops.
1513 * We always set current->last_siginfo while stopped here.
1514 * That makes it a way to test a stopped process for
1515 * being ptrace-stopped vs being job-control-stopped.
1517 * If we actually decide not to stop at all because the tracer
1518 * is gone, we keep current->exit_code unless clear_code.
1520 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1522 if (arch_ptrace_stop_needed(exit_code, info)) {
1524 * The arch code has something special to do before a
1525 * ptrace stop. This is allowed to block, e.g. for faults
1526 * on user stack pages. We can't keep the siglock while
1527 * calling arch_ptrace_stop, so we must release it now.
1528 * To preserve proper semantics, we must do this before
1529 * any signal bookkeeping like checking group_stop_count.
1530 * Meanwhile, a SIGKILL could come in before we retake the
1531 * siglock. That must prevent us from sleeping in TASK_TRACED.
1532 * So after regaining the lock, we must check for SIGKILL.
1534 spin_unlock_irq(¤t->sighand->siglock);
1535 arch_ptrace_stop(exit_code, info);
1536 spin_lock_irq(¤t->sighand->siglock);
1537 if (sigkill_pending(current))
1542 * If there is a group stop in progress,
1543 * we must participate in the bookkeeping.
1545 if (current->signal->group_stop_count > 0)
1546 --current->signal->group_stop_count;
1548 current->last_siginfo = info;
1549 current->exit_code = exit_code;
1551 /* Let the debugger run. */
1552 __set_current_state(TASK_TRACED);
1553 spin_unlock_irq(¤t->sighand->siglock);
1554 read_lock(&tasklist_lock);
1555 if (may_ptrace_stop()) {
1556 do_notify_parent_cldstop(current, CLD_TRAPPED);
1557 read_unlock(&tasklist_lock);
1561 * By the time we got the lock, our tracer went away.
1562 * Don't drop the lock yet, another tracer may come.
1564 __set_current_state(TASK_RUNNING);
1566 current->exit_code = 0;
1567 read_unlock(&tasklist_lock);
1571 * While in TASK_TRACED, we were considered "frozen enough".
1572 * Now that we woke up, it's crucial if we're supposed to be
1573 * frozen that we freeze now before running anything substantial.
1578 * We are back. Now reacquire the siglock before touching
1579 * last_siginfo, so that we are sure to have synchronized with
1580 * any signal-sending on another CPU that wants to examine it.
1582 spin_lock_irq(¤t->sighand->siglock);
1583 current->last_siginfo = NULL;
1586 * Queued signals ignored us while we were stopped for tracing.
1587 * So check for any that we should take before resuming user mode.
1588 * This sets TIF_SIGPENDING, but never clears it.
1590 recalc_sigpending_tsk(current);
1593 void ptrace_notify(int exit_code)
1597 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1599 memset(&info, 0, sizeof info);
1600 info.si_signo = SIGTRAP;
1601 info.si_code = exit_code;
1602 info.si_pid = task_pid_vnr(current);
1603 info.si_uid = current->uid;
1605 /* Let the debugger run. */
1606 spin_lock_irq(¤t->sighand->siglock);
1607 ptrace_stop(exit_code, 1, &info);
1608 spin_unlock_irq(¤t->sighand->siglock);
1612 finish_stop(int stop_count)
1615 * If there are no other threads in the group, or if there is
1616 * a group stop in progress and we are the last to stop,
1617 * report to the parent. When ptraced, every thread reports itself.
1619 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1620 read_lock(&tasklist_lock);
1621 do_notify_parent_cldstop(current, CLD_STOPPED);
1622 read_unlock(&tasklist_lock);
1627 } while (try_to_freeze());
1629 * Now we don't run again until continued.
1631 current->exit_code = 0;
1635 * This performs the stopping for SIGSTOP and other stop signals.
1636 * We have to stop all threads in the thread group.
1637 * Returns nonzero if we've actually stopped and released the siglock.
1638 * Returns zero if we didn't stop and still hold the siglock.
1640 static int do_signal_stop(int signr)
1642 struct signal_struct *sig = current->signal;
1645 if (sig->group_stop_count > 0) {
1647 * There is a group stop in progress. We don't need to
1648 * start another one.
1650 stop_count = --sig->group_stop_count;
1652 struct task_struct *t;
1654 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1655 unlikely(signal_group_exit(sig)))
1658 * There is no group stop already in progress.
1659 * We must initiate one now.
1661 sig->group_exit_code = signr;
1664 for (t = next_thread(current); t != current; t = next_thread(t))
1666 * Setting state to TASK_STOPPED for a group
1667 * stop is always done with the siglock held,
1668 * so this check has no races.
1670 if (!(t->flags & PF_EXITING) &&
1671 !task_is_stopped_or_traced(t)) {
1673 signal_wake_up(t, 0);
1675 sig->group_stop_count = stop_count;
1678 if (stop_count == 0)
1679 sig->flags = SIGNAL_STOP_STOPPED;
1680 current->exit_code = sig->group_exit_code;
1681 __set_current_state(TASK_STOPPED);
1683 spin_unlock_irq(¤t->sighand->siglock);
1684 finish_stop(stop_count);
1688 static int ptrace_signal(int signr, siginfo_t *info,
1689 struct pt_regs *regs, void *cookie)
1691 if (!(current->ptrace & PT_PTRACED))
1694 ptrace_signal_deliver(regs, cookie);
1696 /* Let the debugger run. */
1697 ptrace_stop(signr, 0, info);
1699 /* We're back. Did the debugger cancel the sig? */
1700 signr = current->exit_code;
1704 current->exit_code = 0;
1706 /* Update the siginfo structure if the signal has
1707 changed. If the debugger wanted something
1708 specific in the siginfo structure then it should
1709 have updated *info via PTRACE_SETSIGINFO. */
1710 if (signr != info->si_signo) {
1711 info->si_signo = signr;
1713 info->si_code = SI_USER;
1714 info->si_pid = task_pid_vnr(current->parent);
1715 info->si_uid = current->parent->uid;
1718 /* If the (new) signal is now blocked, requeue it. */
1719 if (sigismember(¤t->blocked, signr)) {
1720 specific_send_sig_info(signr, info, current);
1727 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1728 struct pt_regs *regs, void *cookie)
1730 struct sighand_struct *sighand = current->sighand;
1731 struct signal_struct *signal = current->signal;
1736 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1737 * While in TASK_STOPPED, we were considered "frozen enough".
1738 * Now that we woke up, it's crucial if we're supposed to be
1739 * frozen that we freeze now before running anything substantial.
1743 spin_lock_irq(&sighand->siglock);
1745 * Every stopped thread goes here after wakeup. Check to see if
1746 * we should notify the parent, prepare_signal(SIGCONT) encodes
1747 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1749 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1750 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1751 ? CLD_CONTINUED : CLD_STOPPED;
1752 signal->flags &= ~SIGNAL_CLD_MASK;
1753 spin_unlock_irq(&sighand->siglock);
1755 if (unlikely(!tracehook_notify_jctl(1, why)))
1758 read_lock(&tasklist_lock);
1759 do_notify_parent_cldstop(current->group_leader, why);
1760 read_unlock(&tasklist_lock);
1765 struct k_sigaction *ka;
1767 if (unlikely(signal->group_stop_count > 0) &&
1772 * Tracing can induce an artifical signal and choose sigaction.
1773 * The return value in @signr determines the default action,
1774 * but @info->si_signo is the signal number we will report.
1776 signr = tracehook_get_signal(current, regs, info, return_ka);
1777 if (unlikely(signr < 0))
1779 if (unlikely(signr != 0))
1782 signr = dequeue_signal(current, ¤t->blocked,
1786 break; /* will return 0 */
1788 if (signr != SIGKILL) {
1789 signr = ptrace_signal(signr, info,
1795 ka = &sighand->action[signr-1];
1798 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1800 if (ka->sa.sa_handler != SIG_DFL) {
1801 /* Run the handler. */
1804 if (ka->sa.sa_flags & SA_ONESHOT)
1805 ka->sa.sa_handler = SIG_DFL;
1807 break; /* will return non-zero "signr" value */
1811 * Now we are doing the default action for this signal.
1813 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1817 * Global init gets no signals it doesn't want.
1819 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1820 !signal_group_exit(signal))
1823 if (sig_kernel_stop(signr)) {
1825 * The default action is to stop all threads in
1826 * the thread group. The job control signals
1827 * do nothing in an orphaned pgrp, but SIGSTOP
1828 * always works. Note that siglock needs to be
1829 * dropped during the call to is_orphaned_pgrp()
1830 * because of lock ordering with tasklist_lock.
1831 * This allows an intervening SIGCONT to be posted.
1832 * We need to check for that and bail out if necessary.
1834 if (signr != SIGSTOP) {
1835 spin_unlock_irq(&sighand->siglock);
1837 /* signals can be posted during this window */
1839 if (is_current_pgrp_orphaned())
1842 spin_lock_irq(&sighand->siglock);
1845 if (likely(do_signal_stop(info->si_signo))) {
1846 /* It released the siglock. */
1851 * We didn't actually stop, due to a race
1852 * with SIGCONT or something like that.
1857 spin_unlock_irq(&sighand->siglock);
1860 * Anything else is fatal, maybe with a core dump.
1862 current->flags |= PF_SIGNALED;
1864 if (sig_kernel_coredump(signr)) {
1865 if (print_fatal_signals)
1866 print_fatal_signal(regs, info->si_signo);
1868 * If it was able to dump core, this kills all
1869 * other threads in the group and synchronizes with
1870 * their demise. If we lost the race with another
1871 * thread getting here, it set group_exit_code
1872 * first and our do_group_exit call below will use
1873 * that value and ignore the one we pass it.
1875 do_coredump(info->si_signo, info->si_signo, regs);
1879 * Death signals, no core dump.
1881 do_group_exit(info->si_signo);
1884 spin_unlock_irq(&sighand->siglock);
1888 void exit_signals(struct task_struct *tsk)
1891 struct task_struct *t;
1893 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1894 tsk->flags |= PF_EXITING;
1898 spin_lock_irq(&tsk->sighand->siglock);
1900 * From now this task is not visible for group-wide signals,
1901 * see wants_signal(), do_signal_stop().
1903 tsk->flags |= PF_EXITING;
1904 if (!signal_pending(tsk))
1907 /* It could be that __group_complete_signal() choose us to
1908 * notify about group-wide signal. Another thread should be
1909 * woken now to take the signal since we will not.
1911 for (t = tsk; (t = next_thread(t)) != tsk; )
1912 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1913 recalc_sigpending_and_wake(t);
1915 if (unlikely(tsk->signal->group_stop_count) &&
1916 !--tsk->signal->group_stop_count) {
1917 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1921 spin_unlock_irq(&tsk->sighand->siglock);
1923 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1924 read_lock(&tasklist_lock);
1925 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1926 read_unlock(&tasklist_lock);
1930 EXPORT_SYMBOL(recalc_sigpending);
1931 EXPORT_SYMBOL_GPL(dequeue_signal);
1932 EXPORT_SYMBOL(flush_signals);
1933 EXPORT_SYMBOL(force_sig);
1934 EXPORT_SYMBOL(send_sig);
1935 EXPORT_SYMBOL(send_sig_info);
1936 EXPORT_SYMBOL(sigprocmask);
1937 EXPORT_SYMBOL(block_all_signals);
1938 EXPORT_SYMBOL(unblock_all_signals);
1942 * System call entry points.
1945 asmlinkage long sys_restart_syscall(void)
1947 struct restart_block *restart = ¤t_thread_info()->restart_block;
1948 return restart->fn(restart);
1951 long do_no_restart_syscall(struct restart_block *param)
1957 * We don't need to get the kernel lock - this is all local to this
1958 * particular thread.. (and that's good, because this is _heavily_
1959 * used by various programs)
1963 * This is also useful for kernel threads that want to temporarily
1964 * (or permanently) block certain signals.
1966 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1967 * interface happily blocks "unblockable" signals like SIGKILL
1970 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1974 spin_lock_irq(¤t->sighand->siglock);
1976 *oldset = current->blocked;
1981 sigorsets(¤t->blocked, ¤t->blocked, set);
1984 signandsets(¤t->blocked, ¤t->blocked, set);
1987 current->blocked = *set;
1992 recalc_sigpending();
1993 spin_unlock_irq(¤t->sighand->siglock);
1999 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2001 int error = -EINVAL;
2002 sigset_t old_set, new_set;
2004 /* XXX: Don't preclude handling different sized sigset_t's. */
2005 if (sigsetsize != sizeof(sigset_t))
2010 if (copy_from_user(&new_set, set, sizeof(*set)))
2012 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2014 error = sigprocmask(how, &new_set, &old_set);
2020 spin_lock_irq(¤t->sighand->siglock);
2021 old_set = current->blocked;
2022 spin_unlock_irq(¤t->sighand->siglock);
2026 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2034 long do_sigpending(void __user *set, unsigned long sigsetsize)
2036 long error = -EINVAL;
2039 if (sigsetsize > sizeof(sigset_t))
2042 spin_lock_irq(¤t->sighand->siglock);
2043 sigorsets(&pending, ¤t->pending.signal,
2044 ¤t->signal->shared_pending.signal);
2045 spin_unlock_irq(¤t->sighand->siglock);
2047 /* Outside the lock because only this thread touches it. */
2048 sigandsets(&pending, ¤t->blocked, &pending);
2051 if (!copy_to_user(set, &pending, sigsetsize))
2059 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2061 return do_sigpending(set, sigsetsize);
2064 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2066 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2070 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2072 if (from->si_code < 0)
2073 return __copy_to_user(to, from, sizeof(siginfo_t))
2076 * If you change siginfo_t structure, please be sure
2077 * this code is fixed accordingly.
2078 * Please remember to update the signalfd_copyinfo() function
2079 * inside fs/signalfd.c too, in case siginfo_t changes.
2080 * It should never copy any pad contained in the structure
2081 * to avoid security leaks, but must copy the generic
2082 * 3 ints plus the relevant union member.
2084 err = __put_user(from->si_signo, &to->si_signo);
2085 err |= __put_user(from->si_errno, &to->si_errno);
2086 err |= __put_user((short)from->si_code, &to->si_code);
2087 switch (from->si_code & __SI_MASK) {
2089 err |= __put_user(from->si_pid, &to->si_pid);
2090 err |= __put_user(from->si_uid, &to->si_uid);
2093 err |= __put_user(from->si_tid, &to->si_tid);
2094 err |= __put_user(from->si_overrun, &to->si_overrun);
2095 err |= __put_user(from->si_ptr, &to->si_ptr);
2098 err |= __put_user(from->si_band, &to->si_band);
2099 err |= __put_user(from->si_fd, &to->si_fd);
2102 err |= __put_user(from->si_addr, &to->si_addr);
2103 #ifdef __ARCH_SI_TRAPNO
2104 err |= __put_user(from->si_trapno, &to->si_trapno);
2108 err |= __put_user(from->si_pid, &to->si_pid);
2109 err |= __put_user(from->si_uid, &to->si_uid);
2110 err |= __put_user(from->si_status, &to->si_status);
2111 err |= __put_user(from->si_utime, &to->si_utime);
2112 err |= __put_user(from->si_stime, &to->si_stime);
2114 case __SI_RT: /* This is not generated by the kernel as of now. */
2115 case __SI_MESGQ: /* But this is */
2116 err |= __put_user(from->si_pid, &to->si_pid);
2117 err |= __put_user(from->si_uid, &to->si_uid);
2118 err |= __put_user(from->si_ptr, &to->si_ptr);
2120 default: /* this is just in case for now ... */
2121 err |= __put_user(from->si_pid, &to->si_pid);
2122 err |= __put_user(from->si_uid, &to->si_uid);
2131 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2132 siginfo_t __user *uinfo,
2133 const struct timespec __user *uts,
2142 /* XXX: Don't preclude handling different sized sigset_t's. */
2143 if (sigsetsize != sizeof(sigset_t))
2146 if (copy_from_user(&these, uthese, sizeof(these)))
2150 * Invert the set of allowed signals to get those we
2153 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2157 if (copy_from_user(&ts, uts, sizeof(ts)))
2159 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2164 spin_lock_irq(¤t->sighand->siglock);
2165 sig = dequeue_signal(current, &these, &info);
2167 timeout = MAX_SCHEDULE_TIMEOUT;
2169 timeout = (timespec_to_jiffies(&ts)
2170 + (ts.tv_sec || ts.tv_nsec));
2173 /* None ready -- temporarily unblock those we're
2174 * interested while we are sleeping in so that we'll
2175 * be awakened when they arrive. */
2176 current->real_blocked = current->blocked;
2177 sigandsets(¤t->blocked, ¤t->blocked, &these);
2178 recalc_sigpending();
2179 spin_unlock_irq(¤t->sighand->siglock);
2181 timeout = schedule_timeout_interruptible(timeout);
2183 spin_lock_irq(¤t->sighand->siglock);
2184 sig = dequeue_signal(current, &these, &info);
2185 current->blocked = current->real_blocked;
2186 siginitset(¤t->real_blocked, 0);
2187 recalc_sigpending();
2190 spin_unlock_irq(¤t->sighand->siglock);
2195 if (copy_siginfo_to_user(uinfo, &info))
2208 sys_kill(pid_t pid, int sig)
2210 struct siginfo info;
2212 info.si_signo = sig;
2214 info.si_code = SI_USER;
2215 info.si_pid = task_tgid_vnr(current);
2216 info.si_uid = current->uid;
2218 return kill_something_info(sig, &info, pid);
2221 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2224 struct siginfo info;
2225 struct task_struct *p;
2226 unsigned long flags;
2229 info.si_signo = sig;
2231 info.si_code = SI_TKILL;
2232 info.si_pid = task_tgid_vnr(current);
2233 info.si_uid = current->uid;
2236 p = find_task_by_vpid(pid);
2237 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2238 error = check_kill_permission(sig, &info, p);
2240 * The null signal is a permissions and process existence
2241 * probe. No signal is actually delivered.
2243 * If lock_task_sighand() fails we pretend the task dies
2244 * after receiving the signal. The window is tiny, and the
2245 * signal is private anyway.
2247 if (!error && sig && lock_task_sighand(p, &flags)) {
2248 error = specific_send_sig_info(sig, &info, p);
2249 unlock_task_sighand(p, &flags);
2258 * sys_tgkill - send signal to one specific thread
2259 * @tgid: the thread group ID of the thread
2260 * @pid: the PID of the thread
2261 * @sig: signal to be sent
2263 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2264 * exists but it's not belonging to the target process anymore. This
2265 * method solves the problem of threads exiting and PIDs getting reused.
2267 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2269 /* This is only valid for single tasks */
2270 if (pid <= 0 || tgid <= 0)
2273 return do_tkill(tgid, pid, sig);
2277 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2280 sys_tkill(pid_t pid, int sig)
2282 /* This is only valid for single tasks */
2286 return do_tkill(0, pid, sig);
2290 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2294 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2297 /* Not even root can pretend to send signals from the kernel.
2298 Nor can they impersonate a kill(), which adds source info. */
2299 if (info.si_code >= 0)
2301 info.si_signo = sig;
2303 /* POSIX.1b doesn't mention process groups. */
2304 return kill_proc_info(sig, &info, pid);
2307 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2309 struct task_struct *t = current;
2310 struct k_sigaction *k;
2313 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2316 k = &t->sighand->action[sig-1];
2318 spin_lock_irq(¤t->sighand->siglock);
2323 sigdelsetmask(&act->sa.sa_mask,
2324 sigmask(SIGKILL) | sigmask(SIGSTOP));
2328 * "Setting a signal action to SIG_IGN for a signal that is
2329 * pending shall cause the pending signal to be discarded,
2330 * whether or not it is blocked."
2332 * "Setting a signal action to SIG_DFL for a signal that is
2333 * pending and whose default action is to ignore the signal
2334 * (for example, SIGCHLD), shall cause the pending signal to
2335 * be discarded, whether or not it is blocked"
2337 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2339 sigaddset(&mask, sig);
2340 rm_from_queue_full(&mask, &t->signal->shared_pending);
2342 rm_from_queue_full(&mask, &t->pending);
2344 } while (t != current);
2348 spin_unlock_irq(¤t->sighand->siglock);
2353 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2359 oss.ss_sp = (void __user *) current->sas_ss_sp;
2360 oss.ss_size = current->sas_ss_size;
2361 oss.ss_flags = sas_ss_flags(sp);
2370 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2371 || __get_user(ss_sp, &uss->ss_sp)
2372 || __get_user(ss_flags, &uss->ss_flags)
2373 || __get_user(ss_size, &uss->ss_size))
2377 if (on_sig_stack(sp))
2383 * Note - this code used to test ss_flags incorrectly
2384 * old code may have been written using ss_flags==0
2385 * to mean ss_flags==SS_ONSTACK (as this was the only
2386 * way that worked) - this fix preserves that older
2389 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2392 if (ss_flags == SS_DISABLE) {
2397 if (ss_size < MINSIGSTKSZ)
2401 current->sas_ss_sp = (unsigned long) ss_sp;
2402 current->sas_ss_size = ss_size;
2407 if (copy_to_user(uoss, &oss, sizeof(oss)))
2416 #ifdef __ARCH_WANT_SYS_SIGPENDING
2419 sys_sigpending(old_sigset_t __user *set)
2421 return do_sigpending(set, sizeof(*set));
2426 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2427 /* Some platforms have their own version with special arguments others
2428 support only sys_rt_sigprocmask. */
2431 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2434 old_sigset_t old_set, new_set;
2438 if (copy_from_user(&new_set, set, sizeof(*set)))
2440 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2442 spin_lock_irq(¤t->sighand->siglock);
2443 old_set = current->blocked.sig[0];
2451 sigaddsetmask(¤t->blocked, new_set);
2454 sigdelsetmask(¤t->blocked, new_set);
2457 current->blocked.sig[0] = new_set;
2461 recalc_sigpending();
2462 spin_unlock_irq(¤t->sighand->siglock);
2468 old_set = current->blocked.sig[0];
2471 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2478 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2480 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2482 sys_rt_sigaction(int sig,
2483 const struct sigaction __user *act,
2484 struct sigaction __user *oact,
2487 struct k_sigaction new_sa, old_sa;
2490 /* XXX: Don't preclude handling different sized sigset_t's. */
2491 if (sigsetsize != sizeof(sigset_t))
2495 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2499 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2502 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2508 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2510 #ifdef __ARCH_WANT_SYS_SGETMASK
2513 * For backwards compatibility. Functionality superseded by sigprocmask.
2519 return current->blocked.sig[0];
2523 sys_ssetmask(int newmask)
2527 spin_lock_irq(¤t->sighand->siglock);
2528 old = current->blocked.sig[0];
2530 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2532 recalc_sigpending();
2533 spin_unlock_irq(¤t->sighand->siglock);
2537 #endif /* __ARCH_WANT_SGETMASK */
2539 #ifdef __ARCH_WANT_SYS_SIGNAL
2541 * For backwards compatibility. Functionality superseded by sigaction.
2543 asmlinkage unsigned long
2544 sys_signal(int sig, __sighandler_t handler)
2546 struct k_sigaction new_sa, old_sa;
2549 new_sa.sa.sa_handler = handler;
2550 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2551 sigemptyset(&new_sa.sa.sa_mask);
2553 ret = do_sigaction(sig, &new_sa, &old_sa);
2555 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2557 #endif /* __ARCH_WANT_SYS_SIGNAL */
2559 #ifdef __ARCH_WANT_SYS_PAUSE
2564 current->state = TASK_INTERRUPTIBLE;
2566 return -ERESTARTNOHAND;
2571 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2572 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2576 /* XXX: Don't preclude handling different sized sigset_t's. */
2577 if (sigsetsize != sizeof(sigset_t))
2580 if (copy_from_user(&newset, unewset, sizeof(newset)))
2582 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2584 spin_lock_irq(¤t->sighand->siglock);
2585 current->saved_sigmask = current->blocked;
2586 current->blocked = newset;
2587 recalc_sigpending();
2588 spin_unlock_irq(¤t->sighand->siglock);
2590 current->state = TASK_INTERRUPTIBLE;
2592 set_restore_sigmask();
2593 return -ERESTARTNOHAND;
2595 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2597 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2602 void __init signals_init(void)
2604 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);